16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_GRAPHKIT_HPP
26 #define SHARE_OPTO_GRAPHKIT_HPP
27
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciMethodData.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/callnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/mulnode.hpp"
37 #include "opto/phaseX.hpp"
38 #include "opto/subnode.hpp"
39 #include "opto/type.hpp"
40 #include "runtime/deoptimization.hpp"
41
42 class BarrierSetC2;
43 class FastLockNode;
44 class FastUnlockNode;
45 class IdealKit;
46 class LibraryCallKit;
47 class Parse;
48 class RootNode;
49
50 //-----------------------------------------------------------------------------
51 //----------------------------GraphKit-----------------------------------------
52 // Toolkit for building the common sorts of subgraphs.
53 // Does not know about bytecode parsing or type-flow results.
54 // It is able to create graphs implementing the semantics of most
55 // or all bytecodes, so that it can expand intrinsics and calls.
56 // It may depend on JVMState structure, but it must not depend
57 // on specific bytecode streams.
58 class GraphKit : public Phase {
59 friend class PreserveJVMState;
60
61 protected:
62 ciEnv* _env; // Compilation environment
63 PhaseGVN &_gvn; // Some optimizations while parsing
64 SafePointNode* _map; // Parser map from JVM to Nodes
65 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
66 int _bci; // JVM Bytecode Pointer
67 ciMethod* _method; // JVM Current Method
68 BarrierSetC2* _barrier_set;
69
70 private:
71 int _sp; // JVM Expression Stack Pointer; don't modify directly!
72
73 private:
74 SafePointNode* map_not_null() const {
75 assert(_map != nullptr, "must call stopped() to test for reset compiler map");
76 return _map;
77 }
78
79 public:
80 GraphKit(); // empty constructor
81 GraphKit(JVMState* jvms); // the JVM state on which to operate
82
83 #ifdef ASSERT
84 ~GraphKit() {
85 assert(failing_internal() || !has_exceptions(),
86 "unless compilation failed, user must call transfer_exceptions_into_jvms");
87 }
88 #endif
89
90 virtual Parse* is_Parse() const { return nullptr; }
91 virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; }
92
93 ciEnv* env() const { return _env; }
94 PhaseGVN& gvn() const { return _gvn; }
95 void* barrier_set_state() const { return C->barrier_set_state(); }
96
97 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
98 void remove_for_igvn(Node* n) const { C->remove_for_igvn(n); }
99
100 // Handy well-known nodes:
101 Node* null() const { return zerocon(T_OBJECT); }
102 Node* top() const { return C->top(); }
103 RootNode* root() const { return C->root(); }
104
105 // Create or find a constant node
106 Node* intcon(jint con) const { return _gvn.intcon(con); }
107 Node* longcon(jlong con) const { return _gvn.longcon(con); }
108 Node* integercon(jlong con, BasicType bt) const {
109 if (bt == T_INT) {
110 return intcon(checked_cast<jint>(con));
111 }
112 assert(bt == T_LONG, "basic type not an int or long");
113 return longcon(con);
114 }
115 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
116 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
117 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
337 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new MaxINode(l, r)); }
338 Node* MinI(Node* l, Node* r) { return _gvn.transform(new MinINode(l, r)); }
339
340 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new LShiftINode(l, r)); }
341 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new RShiftINode(l, r)); }
342 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new URShiftINode(l, r)); }
343
344 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new CmpINode(l, r)); }
345 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new CmpLNode(l, r)); }
346 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); }
347 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); }
348
349 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new AddPNode(b, a, o)); }
350
351 // Convert between int and long, and size_t.
352 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
353 Node* ConvI2L(Node* offset);
354 Node* ConvI2UL(Node* offset);
355 Node* ConvL2I(Node* offset);
356 // Find out the klass of an object.
357 Node* load_object_klass(Node* object);
358 // Find out the length of an array.
359 Node* load_array_length(Node* array);
360 // Cast array allocation's length as narrow as possible.
361 // If replace_length_in_map is true, replace length with CastIINode in map.
362 // This method is invoked after creating/moving ArrayAllocationNode or in load_array_length
363 Node* array_ideal_length(AllocateArrayNode* alloc,
364 const TypeOopPtr* oop_type,
365 bool replace_length_in_map);
366
367
368 // Helper function to do a null pointer check or ZERO check based on type.
369 // Throw an exception if a given value is null.
370 // Return the value cast to not-null.
371 // Be clever about equivalent dominating null checks.
372 Node* null_check_common(Node* value, BasicType type,
373 bool assert_null = false,
374 Node* *null_control = nullptr,
375 bool speculative = false);
376 Node* null_check(Node* value, BasicType type = T_OBJECT) {
377 return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null());
378 }
379 Node* null_check_receiver() {
380 assert(argument(0)->bottom_type()->isa_ptr(), "must be");
381 return null_check(argument(0));
382 }
383 Node* zero_check_int(Node* value) {
384 assert(value->bottom_type()->basic_type() == T_INT,
385 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
386 return null_check_common(value, T_INT);
387 }
388 Node* zero_check_long(Node* value) {
389 assert(value->bottom_type()->basic_type() == T_LONG,
390 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
391 return null_check_common(value, T_LONG);
392 }
393 // Throw an uncommon trap if a given value is __not__ null.
394 // Return the value cast to null, and be clever about dominating checks.
395 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
396 return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null());
397 }
398
399 // Check if value is null and abort if it is
400 Node* must_be_not_null(Node* value, bool do_replace_in_map);
432 // record type from profiling with the type system
433 Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind);
434 void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
435 void record_profiled_parameters_for_speculation();
436 void record_profiled_return_for_speculation();
437 Node* record_profiled_receiver_for_speculation(Node* n);
438
439 // Use the type profile to narrow an object type.
440 Node* maybe_cast_profiled_receiver(Node* not_null_obj,
441 const TypeKlassPtr* require_klass,
442 ciKlass* spec,
443 bool safe_for_replace);
444
445 // Cast obj to type and emit guard unless we had too many traps here already
446 Node* maybe_cast_profiled_obj(Node* obj,
447 ciKlass* type,
448 bool not_null = false);
449
450 // Cast obj to not-null on this path
451 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
452 // Replace all occurrences of one node by another.
453 void replace_in_map(Node* old, Node* neww);
454
455 Node* maybe_narrow_object_type(Node* obj, ciKlass* type);
456
457 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
458 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
459 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
460
461 void push_pair(Node* ldval) {
462 push(ldval);
463 push(top()); // the halfword is merely a placeholder
464 }
465 void push_pair_local(int i) {
466 // longs are stored in locals in "push" order
467 push( local(i+0) ); // the real value
468 assert(local(i+1) == top(), "");
469 push(top()); // halfword placeholder
470 }
471 Node* pop_pair() {
562 // procedure must indicate that the store requires `release'
563 // semantics, if the stored value is an object reference that might
564 // point to a new object and may become externally visible.
565 // Return the new StoreXNode
566 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
567 MemNode::MemOrd,
568 bool require_atomic_access = false,
569 bool unaligned = false,
570 bool mismatched = false,
571 bool unsafe = false,
572 int barrier_data = 0);
573
574 // Perform decorated accesses
575
576 Node* access_store_at(Node* obj, // containing obj
577 Node* adr, // actual address to store val at
578 const TypePtr* adr_type,
579 Node* val,
580 const Type* val_type,
581 BasicType bt,
582 DecoratorSet decorators);
583
584 Node* access_load_at(Node* obj, // containing obj
585 Node* adr, // actual address to load val at
586 const TypePtr* adr_type,
587 const Type* val_type,
588 BasicType bt,
589 DecoratorSet decorators);
590
591 Node* access_load(Node* adr, // actual address to load val at
592 const Type* val_type,
593 BasicType bt,
594 DecoratorSet decorators);
595
596 Node* access_atomic_cmpxchg_val_at(Node* obj,
597 Node* adr,
598 const TypePtr* adr_type,
599 int alias_idx,
600 Node* expected_val,
601 Node* new_val,
602 const Type* value_type,
603 BasicType bt,
604 DecoratorSet decorators);
605
606 Node* access_atomic_cmpxchg_bool_at(Node* obj,
607 Node* adr,
608 const TypePtr* adr_type,
609 int alias_idx,
622 BasicType bt,
623 DecoratorSet decorators);
624
625 Node* access_atomic_add_at(Node* obj,
626 Node* adr,
627 const TypePtr* adr_type,
628 int alias_idx,
629 Node* new_val,
630 const Type* value_type,
631 BasicType bt,
632 DecoratorSet decorators);
633
634 void access_clone(Node* src, Node* dst, Node* size, bool is_array);
635
636 // Return addressing for an array element.
637 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
638 // Optional constraint on the array size:
639 const TypeInt* sizetype = nullptr,
640 // Optional control dependency (for example, on range check)
641 Node* ctrl = nullptr);
642
643 // Return a load of array element at idx.
644 Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl);
645
646 //---------------- Dtrace support --------------------
647 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
648 void make_dtrace_method_entry(ciMethod* method) {
649 make_dtrace_method_entry_exit(method, true);
650 }
651 void make_dtrace_method_exit(ciMethod* method) {
652 make_dtrace_method_entry_exit(method, false);
653 }
654
655 //--------------- stub generation -------------------
656 public:
657 void gen_stub(address C_function,
658 const char *name,
659 int is_fancy_jump,
660 bool pass_tls,
661 bool return_pc);
662
663 //---------- help for generating calls --------------
664
665 // Do a null check on the receiver as it would happen before the call to
666 // callee (with all arguments still on the stack).
667 Node* null_check_receiver_before_call(ciMethod* callee) {
668 assert(!callee->is_static(), "must be a virtual method");
669 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
670 // Use callsite signature always.
671 ciMethod* declared_method = method()->get_method_at_bci(bci());
672 const int nargs = declared_method->arg_size();
673 inc_sp(nargs);
674 Node* n = null_check_receiver();
675 dec_sp(nargs);
676 return n;
677 }
678
679 // Fill in argument edges for the call from argument(0), argument(1), ...
680 // (The next step is to call set_edges_for_java_call.)
681 void set_arguments_for_java_call(CallJavaNode* call);
682
683 // Fill in non-argument edges for the call.
684 // Transform the call, and update the basics: control, i_o, memory.
685 // (The next step is usually to call set_results_for_java_call.)
686 void set_edges_for_java_call(CallJavaNode* call,
687 bool must_throw = false, bool separate_io_proj = false);
688
689 // Finish up a java call that was started by set_edges_for_java_call.
690 // Call add_exception on any throw arising from the call.
691 // Return the call result (transformed).
692 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
693
694 // Similar to set_edges_for_java_call, but simplified for runtime calls.
695 void set_predefined_output_for_runtime_call(Node* call) {
696 set_predefined_output_for_runtime_call(call, nullptr, nullptr);
697 }
698 void set_predefined_output_for_runtime_call(Node* call,
699 Node* keep_mem,
700 const TypePtr* hook_mem);
701 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr);
791 void merge_memory(Node* new_mem, Node* region, int new_path);
792 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
793
794 // Helper functions to build synchronizations
795 int next_monitor();
796 Node* insert_mem_bar(int opcode, Node* precedent = nullptr);
797 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr);
798 // Optional 'precedent' is appended as an extra edge, to force ordering.
799 FastLockNode* shared_lock(Node* obj);
800 void shared_unlock(Node* box, Node* obj);
801
802 // helper functions for the fast path/slow path idioms
803 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
804
805 // Generate an instance-of idiom. Used by both the instance-of bytecode
806 // and the reflective instance-of call.
807 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
808
809 // Generate a check-cast idiom. Used by both the check-cast bytecode
810 // and the array-store bytecode
811 Node* gen_checkcast( Node *subobj, Node* superkls,
812 Node* *failure_control = nullptr );
813
814 Node* gen_subtype_check(Node* obj, Node* superklass);
815
816 // Exact type check used for predicted calls and casts.
817 // Rewrites (*casted_receiver) to be casted to the stronger type.
818 // (Caller is responsible for doing replace_in_map.)
819 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
820 Node* *casted_receiver);
821
822 // Inexact type check used for predicted calls.
823 Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
824 Node** casted_receiver);
825
826 // implementation of object creation
827 Node* set_output_for_allocation(AllocateNode* alloc,
828 const TypeOopPtr* oop_type,
829 bool deoptimize_on_exception=false);
830 Node* get_layout_helper(Node* klass_node, jint& constant_value);
831 Node* new_instance(Node* klass_node,
832 Node* slow_test = nullptr,
833 Node* *return_size_val = nullptr,
834 bool deoptimize_on_exception = false);
835 Node* new_array(Node* klass_node, Node* count_val, int nargs,
836 Node* *return_size_val = nullptr,
837 bool deoptimize_on_exception = false);
838
839 // java.lang.String helpers
840 Node* load_String_length(Node* str, bool set_ctrl);
841 Node* load_String_value(Node* str, bool set_ctrl);
842 Node* load_String_coder(Node* str, bool set_ctrl);
843 void store_String_value(Node* str, Node* value);
844 void store_String_coder(Node* str, Node* value);
845 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
846 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
847 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
848 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
849
850 // Handy for making control flow
851 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
852 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
853 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
854 // Place 'if' on worklist if it will be in graph
855 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
856 return iff;
857 }
858
859 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
860 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
861 _gvn.transform(iff); // Value may be known at parse-time
862 // Place 'if' on worklist if it will be in graph
863 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
864 return iff;
865 }
866
867 void add_parse_predicates(int nargs = 0);
868 void add_parse_predicate(Deoptimization::DeoptReason reason, int nargs);
869
870 Node* make_constant_from_field(ciField* field, Node* obj);
871
872 // Vector API support (implemented in vectorIntrinsics.cpp)
873 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
874 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem);
875 Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
876 };
877
878 // Helper class to support building of control flow branches. Upon
879 // creation the map and sp at bci are cloned and restored upon de-
880 // struction. Typical use:
881 //
882 // { PreserveJVMState pjvms(this);
883 // // code of new branch
884 // }
885 // // here the JVM state at bci is established
886
887 class PreserveJVMState: public StackObj {
888 protected:
889 GraphKit* _kit;
890 #ifdef ASSERT
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_GRAPHKIT_HPP
26 #define SHARE_OPTO_GRAPHKIT_HPP
27
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciMethodData.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/callnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/mulnode.hpp"
38 #include "opto/phaseX.hpp"
39 #include "opto/subnode.hpp"
40 #include "opto/type.hpp"
41 #include "runtime/deoptimization.hpp"
42
43 class BarrierSetC2;
44 class FastLockNode;
45 class FastUnlockNode;
46 class IdealKit;
47 class LibraryCallKit;
48 class Parse;
49 class RootNode;
50
51 //-----------------------------------------------------------------------------
52 //----------------------------GraphKit-----------------------------------------
53 // Toolkit for building the common sorts of subgraphs.
54 // Does not know about bytecode parsing or type-flow results.
55 // It is able to create graphs implementing the semantics of most
56 // or all bytecodes, so that it can expand intrinsics and calls.
57 // It may depend on JVMState structure, but it must not depend
58 // on specific bytecode streams.
59 class GraphKit : public Phase {
60 friend class PreserveJVMState;
61
62 protected:
63 ciEnv* _env; // Compilation environment
64 PhaseGVN &_gvn; // Some optimizations while parsing
65 SafePointNode* _map; // Parser map from JVM to Nodes
66 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
67 int _bci; // JVM Bytecode Pointer
68 ciMethod* _method; // JVM Current Method
69 BarrierSetC2* _barrier_set;
70 #ifdef ASSERT
71 uint _worklist_size;
72 #endif
73
74 private:
75 int _sp; // JVM Expression Stack Pointer; don't modify directly!
76
77 private:
78 SafePointNode* map_not_null() const {
79 assert(_map != nullptr, "must call stopped() to test for reset compiler map");
80 return _map;
81 }
82
83 public:
84 GraphKit(); // empty constructor
85 GraphKit(JVMState* jvms, PhaseGVN* gvn = nullptr); // the JVM state on which to operate
86
87 #ifdef ASSERT
88 ~GraphKit() {
89 assert(failing_internal() || !has_exceptions(),
90 "unless compilation failed, user must call transfer_exceptions_into_jvms");
91 #if 0
92 // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN
93 // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies,
94 // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit.
95 assert((_gvn.is_IterGVN() == nullptr) || (_gvn.C->for_igvn()->size() == _worklist_size),
96 "GraphKit should not modify _for_igvn worklist after parsing");
97 #endif
98 }
99 #endif
100
101 virtual Parse* is_Parse() const { return nullptr; }
102 virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; }
103
104 ciEnv* env() const { return _env; }
105 PhaseGVN& gvn() const { return _gvn; }
106 void* barrier_set_state() const { return C->barrier_set_state(); }
107
108 void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); }
109 void remove_for_igvn(Node* n) const { C->remove_for_igvn(n); }
110
111 // Handy well-known nodes:
112 Node* null() const { return zerocon(T_OBJECT); }
113 Node* top() const { return C->top(); }
114 RootNode* root() const { return C->root(); }
115
116 // Create or find a constant node
117 Node* intcon(jint con) const { return _gvn.intcon(con); }
118 Node* longcon(jlong con) const { return _gvn.longcon(con); }
119 Node* integercon(jlong con, BasicType bt) const {
120 if (bt == T_INT) {
121 return intcon(checked_cast<jint>(con));
122 }
123 assert(bt == T_LONG, "basic type not an int or long");
124 return longcon(con);
125 }
126 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
127 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
128 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
348 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new MaxINode(l, r)); }
349 Node* MinI(Node* l, Node* r) { return _gvn.transform(new MinINode(l, r)); }
350
351 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new LShiftINode(l, r)); }
352 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new RShiftINode(l, r)); }
353 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new URShiftINode(l, r)); }
354
355 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new CmpINode(l, r)); }
356 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new CmpLNode(l, r)); }
357 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); }
358 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); }
359
360 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new AddPNode(b, a, o)); }
361
362 // Convert between int and long, and size_t.
363 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
364 Node* ConvI2L(Node* offset);
365 Node* ConvI2UL(Node* offset);
366 Node* ConvL2I(Node* offset);
367 // Find out the klass of an object.
368 Node* load_object_klass(Node* object, bool fold_for_arrays = true);
369 // Find out the length of an array.
370 Node* load_array_length(Node* array);
371 // Cast array allocation's length as narrow as possible.
372 // If replace_length_in_map is true, replace length with CastIINode in map.
373 // This method is invoked after creating/moving ArrayAllocationNode or in load_array_length
374 Node* array_ideal_length(AllocateArrayNode* alloc,
375 const TypeOopPtr* oop_type,
376 bool replace_length_in_map);
377
378
379 // Helper function to do a null pointer check or ZERO check based on type.
380 // Throw an exception if a given value is null.
381 // Return the value cast to not-null.
382 // Be clever about equivalent dominating null checks.
383 Node* null_check_common(Node* value, BasicType type,
384 bool assert_null = false,
385 Node* *null_control = nullptr,
386 bool speculative = false,
387 bool is_init_check = false);
388 Node* null_check(Node* value, BasicType type = T_OBJECT) {
389 return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null());
390 }
391 Node* null_check_receiver() {
392 return null_check(argument(0));
393 }
394 Node* zero_check_int(Node* value) {
395 assert(value->bottom_type()->basic_type() == T_INT,
396 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
397 return null_check_common(value, T_INT);
398 }
399 Node* zero_check_long(Node* value) {
400 assert(value->bottom_type()->basic_type() == T_LONG,
401 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
402 return null_check_common(value, T_LONG);
403 }
404 // Throw an uncommon trap if a given value is __not__ null.
405 // Return the value cast to null, and be clever about dominating checks.
406 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
407 return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null());
408 }
409
410 // Check if value is null and abort if it is
411 Node* must_be_not_null(Node* value, bool do_replace_in_map);
443 // record type from profiling with the type system
444 Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind);
445 void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
446 void record_profiled_parameters_for_speculation();
447 void record_profiled_return_for_speculation();
448 Node* record_profiled_receiver_for_speculation(Node* n);
449
450 // Use the type profile to narrow an object type.
451 Node* maybe_cast_profiled_receiver(Node* not_null_obj,
452 const TypeKlassPtr* require_klass,
453 ciKlass* spec,
454 bool safe_for_replace);
455
456 // Cast obj to type and emit guard unless we had too many traps here already
457 Node* maybe_cast_profiled_obj(Node* obj,
458 ciKlass* type,
459 bool not_null = false);
460
461 // Cast obj to not-null on this path
462 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
463 // If a larval object appears multiple times in the JVMS and we encounter a loop, they will
464 // become multiple Phis and we cannot change all of them to non-larval when we invoke the
465 // constructor on one. The other case is that we don't know whether a parameter of an OSR
466 // compilation is larval or not. If such a maybe-larval object is passed into an operation that
467 // does not permit larval objects, we can be sure that it is not larval and scalarize it if it
468 // is a value object.
469 Node* cast_to_non_larval(Node* obj);
470 // Replace all occurrences of one node by another.
471 void replace_in_map(Node* old, Node* neww);
472
473 Node* maybe_narrow_object_type(Node* obj, ciKlass* type);
474
475 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
476 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
477 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
478
479 void push_pair(Node* ldval) {
480 push(ldval);
481 push(top()); // the halfword is merely a placeholder
482 }
483 void push_pair_local(int i) {
484 // longs are stored in locals in "push" order
485 push( local(i+0) ); // the real value
486 assert(local(i+1) == top(), "");
487 push(top()); // halfword placeholder
488 }
489 Node* pop_pair() {
580 // procedure must indicate that the store requires `release'
581 // semantics, if the stored value is an object reference that might
582 // point to a new object and may become externally visible.
583 // Return the new StoreXNode
584 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
585 MemNode::MemOrd,
586 bool require_atomic_access = false,
587 bool unaligned = false,
588 bool mismatched = false,
589 bool unsafe = false,
590 int barrier_data = 0);
591
592 // Perform decorated accesses
593
594 Node* access_store_at(Node* obj, // containing obj
595 Node* adr, // actual address to store val at
596 const TypePtr* adr_type,
597 Node* val,
598 const Type* val_type,
599 BasicType bt,
600 DecoratorSet decorators,
601 bool safe_for_replace = true,
602 const InlineTypeNode* vt = nullptr);
603
604 Node* access_load_at(Node* obj, // containing obj
605 Node* adr, // actual address to load val at
606 const TypePtr* adr_type,
607 const Type* val_type,
608 BasicType bt,
609 DecoratorSet decorators,
610 Node* ctl = nullptr);
611
612 Node* access_load(Node* adr, // actual address to load val at
613 const Type* val_type,
614 BasicType bt,
615 DecoratorSet decorators);
616
617 Node* access_atomic_cmpxchg_val_at(Node* obj,
618 Node* adr,
619 const TypePtr* adr_type,
620 int alias_idx,
621 Node* expected_val,
622 Node* new_val,
623 const Type* value_type,
624 BasicType bt,
625 DecoratorSet decorators);
626
627 Node* access_atomic_cmpxchg_bool_at(Node* obj,
628 Node* adr,
629 const TypePtr* adr_type,
630 int alias_idx,
643 BasicType bt,
644 DecoratorSet decorators);
645
646 Node* access_atomic_add_at(Node* obj,
647 Node* adr,
648 const TypePtr* adr_type,
649 int alias_idx,
650 Node* new_val,
651 const Type* value_type,
652 BasicType bt,
653 DecoratorSet decorators);
654
655 void access_clone(Node* src, Node* dst, Node* size, bool is_array);
656
657 // Return addressing for an array element.
658 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
659 // Optional constraint on the array size:
660 const TypeInt* sizetype = nullptr,
661 // Optional control dependency (for example, on range check)
662 Node* ctrl = nullptr);
663 Node* flat_array_element_address(Node*& array, Node* idx, ciInlineKlass* vk, bool is_null_free,
664 bool is_not_null_free, bool is_atomic);
665
666 // Return a load of array element at idx.
667 Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl);
668
669 //---------------- Dtrace support --------------------
670 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
671 void make_dtrace_method_entry(ciMethod* method) {
672 make_dtrace_method_entry_exit(method, true);
673 }
674 void make_dtrace_method_exit(ciMethod* method) {
675 make_dtrace_method_entry_exit(method, false);
676 }
677
678 //--------------- stub generation -------------------
679 public:
680 void gen_stub(address C_function,
681 const char *name,
682 int is_fancy_jump,
683 bool pass_tls,
684 bool return_pc);
685
686 //---------- help for generating calls --------------
687
688 // Do a null check on the receiver as it would happen before the call to
689 // callee (with all arguments still on the stack).
690 Node* null_check_receiver_before_call(ciMethod* callee) {
691 assert(!callee->is_static(), "must be a virtual method");
692 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
693 // Use callsite signature always.
694 ciMethod* declared_method = method()->get_method_at_bci(bci());
695 const int nargs = declared_method->arg_size();
696 inc_sp(nargs);
697 Node* n = null_check_receiver();
698 dec_sp(nargs);
699 return n;
700 }
701
702 // Fill in argument edges for the call from argument(0), argument(1), ...
703 // (The next step is to call set_edges_for_java_call.)
704 void set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline = false);
705
706 // Fill in non-argument edges for the call.
707 // Transform the call, and update the basics: control, i_o, memory.
708 // (The next step is usually to call set_results_for_java_call.)
709 void set_edges_for_java_call(CallJavaNode* call,
710 bool must_throw = false, bool separate_io_proj = false);
711
712 // Finish up a java call that was started by set_edges_for_java_call.
713 // Call add_exception on any throw arising from the call.
714 // Return the call result (transformed).
715 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
716
717 // Similar to set_edges_for_java_call, but simplified for runtime calls.
718 void set_predefined_output_for_runtime_call(Node* call) {
719 set_predefined_output_for_runtime_call(call, nullptr, nullptr);
720 }
721 void set_predefined_output_for_runtime_call(Node* call,
722 Node* keep_mem,
723 const TypePtr* hook_mem);
724 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr);
814 void merge_memory(Node* new_mem, Node* region, int new_path);
815 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
816
817 // Helper functions to build synchronizations
818 int next_monitor();
819 Node* insert_mem_bar(int opcode, Node* precedent = nullptr);
820 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr);
821 // Optional 'precedent' is appended as an extra edge, to force ordering.
822 FastLockNode* shared_lock(Node* obj);
823 void shared_unlock(Node* box, Node* obj);
824
825 // helper functions for the fast path/slow path idioms
826 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
827
828 // Generate an instance-of idiom. Used by both the instance-of bytecode
829 // and the reflective instance-of call.
830 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
831
832 // Generate a check-cast idiom. Used by both the check-cast bytecode
833 // and the array-store bytecode
834 Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = nullptr, bool null_free = false, bool maybe_larval = false);
835
836 // Inline types
837 Node* mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock = true);
838 Node* inline_type_test(Node* obj, bool is_inline = true);
839 Node* flat_array_test(Node* array_or_klass, bool flat = true);
840 Node* null_free_array_test(Node* array, bool null_free = true);
841 Node* null_free_atomic_array_test(Node* array, ciInlineKlass* vk);
842 Node* inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace = false);
843
844 Node* gen_subtype_check(Node* obj, Node* superklass);
845
846 // Exact type check used for predicted calls and casts.
847 // Rewrites (*casted_receiver) to be casted to the stronger type.
848 // (Caller is responsible for doing replace_in_map.)
849 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
850 Node* *casted_receiver);
851 Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob);
852
853 // Inexact type check used for predicted calls.
854 Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
855 Node** casted_receiver);
856
857 // implementation of object creation
858 Node* set_output_for_allocation(AllocateNode* alloc,
859 const TypeOopPtr* oop_type,
860 bool deoptimize_on_exception=false);
861 Node* get_layout_helper(Node* klass_node, jint& constant_value);
862 Node* new_instance(Node* klass_node,
863 Node* slow_test = nullptr,
864 Node* *return_size_val = nullptr,
865 bool deoptimize_on_exception = false,
866 InlineTypeNode* inline_type_node = nullptr);
867 Node* new_array(Node* klass_node, Node* count_val, int nargs,
868 Node* *return_size_val = nullptr,
869 bool deoptimize_on_exception = false,
870 Node* init_val = nullptr);
871
872 // java.lang.String helpers
873 Node* load_String_length(Node* str, bool set_ctrl);
874 Node* load_String_value(Node* str, bool set_ctrl);
875 Node* load_String_coder(Node* str, bool set_ctrl);
876 void store_String_value(Node* str, Node* value);
877 void store_String_coder(Node* str, Node* value);
878 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
879 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
880 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
881 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
882
883 // Handy for making control flow
884 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
885 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
886 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
887 // Place 'if' on worklist if it will be in graph
888 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
889 return iff;
890 }
891
892 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
893 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
894 _gvn.transform(iff); // Value may be known at parse-time
895 // Place 'if' on worklist if it will be in graph
896 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
897 return iff;
898 }
899
900 void add_parse_predicates(int nargs = 0);
901 void add_parse_predicate(Deoptimization::DeoptReason reason, int nargs);
902
903 Node* make_constant_from_field(ciField* field, Node* obj);
904 Node* load_mirror_from_klass(Node* klass);
905
906 // Vector API support (implemented in vectorIntrinsics.cpp)
907 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
908 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem);
909 Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
910 };
911
912 // Helper class to support building of control flow branches. Upon
913 // creation the map and sp at bci are cloned and restored upon de-
914 // struction. Typical use:
915 //
916 // { PreserveJVMState pjvms(this);
917 // // code of new branch
918 // }
919 // // here the JVM state at bci is established
920
921 class PreserveJVMState: public StackObj {
922 protected:
923 GraphKit* _kit;
924 #ifdef ASSERT
|