16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_GRAPHKIT_HPP
26 #define SHARE_OPTO_GRAPHKIT_HPP
27
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciMethodData.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/callnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/mulnode.hpp"
37 #include "opto/phaseX.hpp"
38 #include "opto/subnode.hpp"
39 #include "opto/type.hpp"
40 #include "runtime/deoptimization.hpp"
41
42 class BarrierSetC2;
43 class FastLockNode;
44 class FastUnlockNode;
45 class IdealKit;
46 class LibraryCallKit;
47 class Parse;
48 class RootNode;
49
50 //-----------------------------------------------------------------------------
51 //----------------------------GraphKit-----------------------------------------
52 // Toolkit for building the common sorts of subgraphs.
53 // Does not know about bytecode parsing or type-flow results.
54 // It is able to create graphs implementing the semantics of most
55 // or all bytecodes, so that it can expand intrinsics and calls.
56 // It may depend on JVMState structure, but it must not depend
57 // on specific bytecode streams.
58 class GraphKit : public Phase {
59 friend class PreserveJVMState;
60
61 protected:
62 ciEnv* _env; // Compilation environment
63 PhaseGVN &_gvn; // Some optimizations while parsing
64 SafePointNode* _map; // Parser map from JVM to Nodes
65 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
66 int _bci; // JVM Bytecode Pointer
67 ciMethod* _method; // JVM Current Method
68 BarrierSetC2* _barrier_set;
69
70 private:
71 int _sp; // JVM Expression Stack Pointer; don't modify directly!
72
73 private:
74 SafePointNode* map_not_null() const {
75 assert(_map != nullptr, "must call stopped() to test for reset compiler map");
76 return _map;
77 }
78
79 public:
80 GraphKit(); // empty constructor
81 GraphKit(JVMState* jvms); // the JVM state on which to operate
82
83 #ifdef ASSERT
84 ~GraphKit() {
85 assert(failing_internal() || !has_exceptions(),
86 "unless compilation failed, user must call transfer_exceptions_into_jvms");
87 }
88 #endif
89
90 virtual Parse* is_Parse() const { return nullptr; }
91 virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; }
92
93 ciEnv* env() const { return _env; }
94 PhaseGVN& gvn() const { return _gvn; }
95 void* barrier_set_state() const { return C->barrier_set_state(); }
96
97 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
98 void remove_for_igvn(Node* n) const { C->remove_for_igvn(n); }
99
100 // Handy well-known nodes:
101 Node* null() const { return zerocon(T_OBJECT); }
102 Node* top() const { return C->top(); }
103 RootNode* root() const { return C->root(); }
104
105 // Create or find a constant node
106 Node* intcon(jint con) const { return _gvn.intcon(con); }
107 Node* longcon(jlong con) const { return _gvn.longcon(con); }
108 Node* integercon(jlong con, BasicType bt) const {
109 if (bt == T_INT) {
110 return intcon(checked_cast<jint>(con));
111 }
112 assert(bt == T_LONG, "basic type not an int or long");
113 return longcon(con);
114 }
115 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
116 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
117 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
345 Node* ConvL2I(Node* offset);
346 // Find out the klass of an object.
347 Node* load_object_klass(Node* object);
348 // Find out the length of an array.
349 Node* load_array_length(Node* array);
350 // Cast array allocation's length as narrow as possible.
351 // If replace_length_in_map is true, replace length with CastIINode in map.
352 // This method is invoked after creating/moving ArrayAllocationNode or in load_array_length
353 Node* array_ideal_length(AllocateArrayNode* alloc,
354 const TypeOopPtr* oop_type,
355 bool replace_length_in_map);
356
357
358 // Helper function to do a null pointer check or ZERO check based on type.
359 // Throw an exception if a given value is null.
360 // Return the value cast to not-null.
361 // Be clever about equivalent dominating null checks.
362 Node* null_check_common(Node* value, BasicType type,
363 bool assert_null = false,
364 Node* *null_control = nullptr,
365 bool speculative = false);
366 Node* null_check(Node* value, BasicType type = T_OBJECT) {
367 return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null());
368 }
369 Node* null_check_receiver() {
370 assert(argument(0)->bottom_type()->isa_ptr(), "must be");
371 return null_check(argument(0));
372 }
373 Node* zero_check_int(Node* value) {
374 assert(value->bottom_type()->basic_type() == T_INT,
375 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
376 return null_check_common(value, T_INT);
377 }
378 Node* zero_check_long(Node* value) {
379 assert(value->bottom_type()->basic_type() == T_LONG,
380 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
381 return null_check_common(value, T_LONG);
382 }
383 // Throw an uncommon trap if a given value is __not__ null.
384 // Return the value cast to null, and be clever about dominating checks.
385 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
386 return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null());
387 }
388
389 // Check if value is null and abort if it is
390 Node* must_be_not_null(Node* value, bool do_replace_in_map);
552 // procedure must indicate that the store requires `release'
553 // semantics, if the stored value is an object reference that might
554 // point to a new object and may become externally visible.
555 // Return the new StoreXNode
556 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
557 MemNode::MemOrd,
558 bool require_atomic_access = false,
559 bool unaligned = false,
560 bool mismatched = false,
561 bool unsafe = false,
562 int barrier_data = 0);
563
564 // Perform decorated accesses
565
566 Node* access_store_at(Node* obj, // containing obj
567 Node* adr, // actual address to store val at
568 const TypePtr* adr_type,
569 Node* val,
570 const Type* val_type,
571 BasicType bt,
572 DecoratorSet decorators);
573
574 Node* access_load_at(Node* obj, // containing obj
575 Node* adr, // actual address to load val at
576 const TypePtr* adr_type,
577 const Type* val_type,
578 BasicType bt,
579 DecoratorSet decorators);
580
581 Node* access_load(Node* adr, // actual address to load val at
582 const Type* val_type,
583 BasicType bt,
584 DecoratorSet decorators);
585
586 Node* access_atomic_cmpxchg_val_at(Node* obj,
587 Node* adr,
588 const TypePtr* adr_type,
589 int alias_idx,
590 Node* expected_val,
591 Node* new_val,
592 const Type* value_type,
593 BasicType bt,
594 DecoratorSet decorators);
595
596 Node* access_atomic_cmpxchg_bool_at(Node* obj,
597 Node* adr,
598 const TypePtr* adr_type,
599 int alias_idx,
651 bool return_pc);
652
653 //---------- help for generating calls --------------
654
655 // Do a null check on the receiver as it would happen before the call to
656 // callee (with all arguments still on the stack).
657 Node* null_check_receiver_before_call(ciMethod* callee) {
658 assert(!callee->is_static(), "must be a virtual method");
659 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
660 // Use callsite signature always.
661 ciMethod* declared_method = method()->get_method_at_bci(bci());
662 const int nargs = declared_method->arg_size();
663 inc_sp(nargs);
664 Node* n = null_check_receiver();
665 dec_sp(nargs);
666 return n;
667 }
668
669 // Fill in argument edges for the call from argument(0), argument(1), ...
670 // (The next step is to call set_edges_for_java_call.)
671 void set_arguments_for_java_call(CallJavaNode* call);
672
673 // Fill in non-argument edges for the call.
674 // Transform the call, and update the basics: control, i_o, memory.
675 // (The next step is usually to call set_results_for_java_call.)
676 void set_edges_for_java_call(CallJavaNode* call,
677 bool must_throw = false, bool separate_io_proj = false);
678
679 // Finish up a java call that was started by set_edges_for_java_call.
680 // Call add_exception on any throw arising from the call.
681 // Return the call result (transformed).
682 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
683
684 // Similar to set_edges_for_java_call, but simplified for runtime calls.
685 void set_predefined_output_for_runtime_call(Node* call) {
686 set_predefined_output_for_runtime_call(call, nullptr, nullptr);
687 }
688 void set_predefined_output_for_runtime_call(Node* call,
689 Node* keep_mem,
690 const TypePtr* hook_mem);
691 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr);
790 void merge_memory(Node* new_mem, Node* region, int new_path);
791 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
792
793 // Helper functions to build synchronizations
794 int next_monitor();
795 Node* insert_mem_bar(int opcode, Node* precedent = nullptr);
796 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr);
797 // Optional 'precedent' is appended as an extra edge, to force ordering.
798 FastLockNode* shared_lock(Node* obj);
799 void shared_unlock(Node* box, Node* obj);
800
801 // helper functions for the fast path/slow path idioms
802 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
803
804 // Generate an instance-of idiom. Used by both the instance-of bytecode
805 // and the reflective instance-of call.
806 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
807
808 // Generate a check-cast idiom. Used by both the check-cast bytecode
809 // and the array-store bytecode
810 Node* gen_checkcast( Node *subobj, Node* superkls,
811 Node* *failure_control = nullptr );
812
813 Node* gen_subtype_check(Node* obj, Node* superklass);
814
815 // Exact type check used for predicted calls and casts.
816 // Rewrites (*casted_receiver) to be casted to the stronger type.
817 // (Caller is responsible for doing replace_in_map.)
818 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
819 Node* *casted_receiver);
820
821 // Inexact type check used for predicted calls.
822 Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
823 Node** casted_receiver);
824
825 // implementation of object creation
826 Node* set_output_for_allocation(AllocateNode* alloc,
827 const TypeOopPtr* oop_type,
828 bool deoptimize_on_exception=false);
829 Node* get_layout_helper(Node* klass_node, jint& constant_value);
830 Node* new_instance(Node* klass_node,
831 Node* slow_test = nullptr,
832 Node* *return_size_val = nullptr,
833 bool deoptimize_on_exception = false);
834 Node* new_array(Node* klass_node, Node* count_val, int nargs,
835 Node* *return_size_val = nullptr,
836 bool deoptimize_on_exception = false);
837
838 // java.lang.String helpers
839 Node* load_String_length(Node* str, bool set_ctrl);
840 Node* load_String_value(Node* str, bool set_ctrl);
841 Node* load_String_coder(Node* str, bool set_ctrl);
842 void store_String_value(Node* str, Node* value);
843 void store_String_coder(Node* str, Node* value);
844 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
845 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
846 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
847 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
848
849 // Handy for making control flow
850 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
851 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
852 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
853 // Place 'if' on worklist if it will be in graph
854 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
855 return iff;
856 }
857
858 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
859 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
860 _gvn.transform(iff); // Value may be known at parse-time
861 // Place 'if' on worklist if it will be in graph
862 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
863 return iff;
864 }
865
866 void add_parse_predicates(int nargs = 0);
867 void add_parse_predicate(Deoptimization::DeoptReason reason, int nargs);
868
869 Node* make_constant_from_field(ciField* field, Node* obj);
870
871 // Vector API support (implemented in vectorIntrinsics.cpp)
872 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
873 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem);
874 Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
875 };
876
877 // Helper class to support building of control flow branches. Upon
878 // creation the map and sp at bci are cloned and restored upon de-
879 // struction. Typical use:
880 //
881 // { PreserveJVMState pjvms(this);
882 // // code of new branch
883 // }
884 // // here the JVM state at bci is established
885
886 class PreserveJVMState: public StackObj {
887 protected:
888 GraphKit* _kit;
889 #ifdef ASSERT
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_GRAPHKIT_HPP
26 #define SHARE_OPTO_GRAPHKIT_HPP
27
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciMethodData.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/callnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/mulnode.hpp"
38 #include "opto/phaseX.hpp"
39 #include "opto/subnode.hpp"
40 #include "opto/type.hpp"
41 #include "runtime/deoptimization.hpp"
42
43 class BarrierSetC2;
44 class FastLockNode;
45 class FastUnlockNode;
46 class IdealKit;
47 class LibraryCallKit;
48 class Parse;
49 class RootNode;
50
51 //-----------------------------------------------------------------------------
52 //----------------------------GraphKit-----------------------------------------
53 // Toolkit for building the common sorts of subgraphs.
54 // Does not know about bytecode parsing or type-flow results.
55 // It is able to create graphs implementing the semantics of most
56 // or all bytecodes, so that it can expand intrinsics and calls.
57 // It may depend on JVMState structure, but it must not depend
58 // on specific bytecode streams.
59 class GraphKit : public Phase {
60 friend class PreserveJVMState;
61
62 protected:
63 ciEnv* _env; // Compilation environment
64 PhaseGVN &_gvn; // Some optimizations while parsing
65 SafePointNode* _map; // Parser map from JVM to Nodes
66 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
67 int _bci; // JVM Bytecode Pointer
68 ciMethod* _method; // JVM Current Method
69 BarrierSetC2* _barrier_set;
70 #ifdef ASSERT
71 uint _worklist_size;
72 #endif
73
74 private:
75 int _sp; // JVM Expression Stack Pointer; don't modify directly!
76
77 private:
78 SafePointNode* map_not_null() const {
79 assert(_map != nullptr, "must call stopped() to test for reset compiler map");
80 return _map;
81 }
82
83 public:
84 GraphKit(); // empty constructor
85 GraphKit(JVMState* jvms, PhaseGVN* gvn = nullptr); // the JVM state on which to operate
86
87 #ifdef ASSERT
88 ~GraphKit() {
89 assert(failing_internal() || !has_exceptions(),
90 "unless compilation failed, user must call transfer_exceptions_into_jvms");
91 #if 0
92 // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN
93 // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies,
94 // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit.
95 assert((_gvn.is_IterGVN() == nullptr) || (_gvn.C->for_igvn()->size() == _worklist_size),
96 "GraphKit should not modify _for_igvn worklist after parsing");
97 #endif
98 }
99 #endif
100
101 virtual Parse* is_Parse() const { return nullptr; }
102 virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; }
103
104 ciEnv* env() const { return _env; }
105 PhaseGVN& gvn() const { return _gvn; }
106 void* barrier_set_state() const { return C->barrier_set_state(); }
107
108 void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); }
109 void remove_for_igvn(Node* n) const { C->remove_for_igvn(n); }
110
111 // Handy well-known nodes:
112 Node* null() const { return zerocon(T_OBJECT); }
113 Node* top() const { return C->top(); }
114 RootNode* root() const { return C->root(); }
115
116 // Create or find a constant node
117 Node* intcon(jint con) const { return _gvn.intcon(con); }
118 Node* longcon(jlong con) const { return _gvn.longcon(con); }
119 Node* integercon(jlong con, BasicType bt) const {
120 if (bt == T_INT) {
121 return intcon(checked_cast<jint>(con));
122 }
123 assert(bt == T_LONG, "basic type not an int or long");
124 return longcon(con);
125 }
126 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
127 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
128 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
356 Node* ConvL2I(Node* offset);
357 // Find out the klass of an object.
358 Node* load_object_klass(Node* object);
359 // Find out the length of an array.
360 Node* load_array_length(Node* array);
361 // Cast array allocation's length as narrow as possible.
362 // If replace_length_in_map is true, replace length with CastIINode in map.
363 // This method is invoked after creating/moving ArrayAllocationNode or in load_array_length
364 Node* array_ideal_length(AllocateArrayNode* alloc,
365 const TypeOopPtr* oop_type,
366 bool replace_length_in_map);
367
368
369 // Helper function to do a null pointer check or ZERO check based on type.
370 // Throw an exception if a given value is null.
371 // Return the value cast to not-null.
372 // Be clever about equivalent dominating null checks.
373 Node* null_check_common(Node* value, BasicType type,
374 bool assert_null = false,
375 Node* *null_control = nullptr,
376 bool speculative = false,
377 bool is_init_check = false);
378 Node* null_check(Node* value, BasicType type = T_OBJECT) {
379 return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null());
380 }
381 Node* null_check_receiver() {
382 return null_check(argument(0));
383 }
384 Node* zero_check_int(Node* value) {
385 assert(value->bottom_type()->basic_type() == T_INT,
386 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
387 return null_check_common(value, T_INT);
388 }
389 Node* zero_check_long(Node* value) {
390 assert(value->bottom_type()->basic_type() == T_LONG,
391 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
392 return null_check_common(value, T_LONG);
393 }
394 // Throw an uncommon trap if a given value is __not__ null.
395 // Return the value cast to null, and be clever about dominating checks.
396 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
397 return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null());
398 }
399
400 // Check if value is null and abort if it is
401 Node* must_be_not_null(Node* value, bool do_replace_in_map);
563 // procedure must indicate that the store requires `release'
564 // semantics, if the stored value is an object reference that might
565 // point to a new object and may become externally visible.
566 // Return the new StoreXNode
567 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
568 MemNode::MemOrd,
569 bool require_atomic_access = false,
570 bool unaligned = false,
571 bool mismatched = false,
572 bool unsafe = false,
573 int barrier_data = 0);
574
575 // Perform decorated accesses
576
577 Node* access_store_at(Node* obj, // containing obj
578 Node* adr, // actual address to store val at
579 const TypePtr* adr_type,
580 Node* val,
581 const Type* val_type,
582 BasicType bt,
583 DecoratorSet decorators,
584 bool safe_for_replace = true,
585 const InlineTypeNode* vt = nullptr);
586
587 Node* access_load_at(Node* obj, // containing obj
588 Node* adr, // actual address to load val at
589 const TypePtr* adr_type,
590 const Type* val_type,
591 BasicType bt,
592 DecoratorSet decorators,
593 Node* ctl = nullptr);
594
595 Node* access_load(Node* adr, // actual address to load val at
596 const Type* val_type,
597 BasicType bt,
598 DecoratorSet decorators);
599
600 Node* access_atomic_cmpxchg_val_at(Node* obj,
601 Node* adr,
602 const TypePtr* adr_type,
603 int alias_idx,
604 Node* expected_val,
605 Node* new_val,
606 const Type* value_type,
607 BasicType bt,
608 DecoratorSet decorators);
609
610 Node* access_atomic_cmpxchg_bool_at(Node* obj,
611 Node* adr,
612 const TypePtr* adr_type,
613 int alias_idx,
665 bool return_pc);
666
667 //---------- help for generating calls --------------
668
669 // Do a null check on the receiver as it would happen before the call to
670 // callee (with all arguments still on the stack).
671 Node* null_check_receiver_before_call(ciMethod* callee) {
672 assert(!callee->is_static(), "must be a virtual method");
673 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
674 // Use callsite signature always.
675 ciMethod* declared_method = method()->get_method_at_bci(bci());
676 const int nargs = declared_method->arg_size();
677 inc_sp(nargs);
678 Node* n = null_check_receiver();
679 dec_sp(nargs);
680 return n;
681 }
682
683 // Fill in argument edges for the call from argument(0), argument(1), ...
684 // (The next step is to call set_edges_for_java_call.)
685 void set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline = false);
686
687 // Fill in non-argument edges for the call.
688 // Transform the call, and update the basics: control, i_o, memory.
689 // (The next step is usually to call set_results_for_java_call.)
690 void set_edges_for_java_call(CallJavaNode* call,
691 bool must_throw = false, bool separate_io_proj = false);
692
693 // Finish up a java call that was started by set_edges_for_java_call.
694 // Call add_exception on any throw arising from the call.
695 // Return the call result (transformed).
696 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
697
698 // Similar to set_edges_for_java_call, but simplified for runtime calls.
699 void set_predefined_output_for_runtime_call(Node* call) {
700 set_predefined_output_for_runtime_call(call, nullptr, nullptr);
701 }
702 void set_predefined_output_for_runtime_call(Node* call,
703 Node* keep_mem,
704 const TypePtr* hook_mem);
705 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr);
804 void merge_memory(Node* new_mem, Node* region, int new_path);
805 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
806
807 // Helper functions to build synchronizations
808 int next_monitor();
809 Node* insert_mem_bar(int opcode, Node* precedent = nullptr);
810 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr);
811 // Optional 'precedent' is appended as an extra edge, to force ordering.
812 FastLockNode* shared_lock(Node* obj);
813 void shared_unlock(Node* box, Node* obj);
814
815 // helper functions for the fast path/slow path idioms
816 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
817
818 // Generate an instance-of idiom. Used by both the instance-of bytecode
819 // and the reflective instance-of call.
820 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
821
822 // Generate a check-cast idiom. Used by both the check-cast bytecode
823 // and the array-store bytecode
824 Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = nullptr, bool null_free = false);
825
826 // Inline types
827 Node* mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock = true);
828 Node* inline_type_test(Node* obj, bool is_inline = true);
829 Node* flat_array_test(Node* array_or_klass, bool flat = true);
830 Node* null_free_array_test(Node* array, bool null_free = true);
831 Node* inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace = false);
832
833 Node* gen_subtype_check(Node* obj, Node* superklass);
834
835 // Exact type check used for predicted calls and casts.
836 // Rewrites (*casted_receiver) to be casted to the stronger type.
837 // (Caller is responsible for doing replace_in_map.)
838 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
839 Node* *casted_receiver);
840 Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob);
841
842 // Inexact type check used for predicted calls.
843 Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
844 Node** casted_receiver);
845
846 // implementation of object creation
847 Node* set_output_for_allocation(AllocateNode* alloc,
848 const TypeOopPtr* oop_type,
849 bool deoptimize_on_exception=false);
850 Node* get_layout_helper(Node* klass_node, jint& constant_value);
851 Node* new_instance(Node* klass_node,
852 Node* slow_test = nullptr,
853 Node* *return_size_val = nullptr,
854 bool deoptimize_on_exception = false,
855 InlineTypeNode* inline_type_node = nullptr);
856 Node* new_array(Node* klass_node, Node* count_val, int nargs,
857 Node* *return_size_val = nullptr,
858 bool deoptimize_on_exception = false);
859
860 // java.lang.String helpers
861 Node* load_String_length(Node* str, bool set_ctrl);
862 Node* load_String_value(Node* str, bool set_ctrl);
863 Node* load_String_coder(Node* str, bool set_ctrl);
864 void store_String_value(Node* str, Node* value);
865 void store_String_coder(Node* str, Node* value);
866 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
867 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
868 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
869 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
870
871 // Handy for making control flow
872 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
873 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
874 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
875 // Place 'if' on worklist if it will be in graph
876 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
877 return iff;
878 }
879
880 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
881 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
882 _gvn.transform(iff); // Value may be known at parse-time
883 // Place 'if' on worklist if it will be in graph
884 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
885 return iff;
886 }
887
888 void add_parse_predicates(int nargs = 0);
889 void add_parse_predicate(Deoptimization::DeoptReason reason, int nargs);
890
891 Node* make_constant_from_field(ciField* field, Node* obj);
892 Node* load_mirror_from_klass(Node* klass);
893
894 // Vector API support (implemented in vectorIntrinsics.cpp)
895 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
896 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem);
897 Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
898 };
899
900 // Helper class to support building of control flow branches. Upon
901 // creation the map and sp at bci are cloned and restored upon de-
902 // struction. Typical use:
903 //
904 // { PreserveJVMState pjvms(this);
905 // // code of new branch
906 // }
907 // // here the JVM state at bci is established
908
909 class PreserveJVMState: public StackObj {
910 protected:
911 GraphKit* _kit;
912 #ifdef ASSERT
|