13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_CFGNODE_HPP
26 #define SHARE_OPTO_CFGNODE_HPP
27
28 #include "opto/multnode.hpp"
29 #include "opto/node.hpp"
30 #include "opto/opcodes.hpp"
31 #include "opto/predicates_enums.hpp"
32 #include "opto/type.hpp"
33
34 // Portions of code courtesy of Clifford Click
35
36 // Optimization - Graph Style
37
38 class Matcher;
39 class Node;
40 class RegionNode;
41 class TypeNode;
42 class PhiNode;
43 class GotoNode;
44 class MultiNode;
45 class MultiBranchNode;
46 class IfNode;
47 class PCTableNode;
48 class JumpNode;
49 class CatchNode;
50 class NeverBranchNode;
51 class BlackholeNode;
52 class ProjNode;
163 int _inst_mem_id; // Instance memory id (node index of the memory Phi)
164 int _inst_id; // Instance id of the memory slice.
165 const int _inst_index; // Alias index of the instance memory slice.
166 // Array elements references have the same alias_idx but different offset.
167 const int _inst_offset; // Offset of the instance memory slice.
168 // Size is bigger to hold the _adr_type field.
169 virtual uint hash() const; // Check the type
170 virtual bool cmp( const Node &n ) const;
171 virtual uint size_of() const { return sizeof(*this); }
172
173 // Determine if CMoveNode::is_cmove_id can be used at this join point.
174 Node* is_cmove_id(PhaseTransform* phase, int true_path);
175 bool wait_for_region_igvn(PhaseGVN* phase);
176 bool is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase);
177
178 static Node* clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn);
179 static Node* merge_through_phi(Node* root_phi, PhaseIterGVN* igvn);
180
181 bool must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const;
182
183 bool is_split_through_mergemem_terminating() const;
184
185 void verify_type_stability(const PhaseGVN* phase, const Type* union_of_input_types, const Type* new_type) const NOT_DEBUG_RETURN;
186 bool wait_for_cast_input_igvn(const PhaseIterGVN* igvn) const;
187
188 public:
189 // Node layout (parallels RegionNode):
190 enum { Region, // Control input is the Phi's region.
191 Input // Input values are [1..len)
192 };
193
194 PhiNode( Node *r, const Type *t, const TypePtr* at = nullptr,
195 const int imid = -1,
196 const int iid = TypeOopPtr::InstanceTop,
197 const int iidx = Compile::AliasIdxTop,
198 const int ioffs = Type::OffsetTop )
199 : TypeNode(t,r->req()),
200 _adr_type(at),
201 _inst_mem_id(imid),
202 _inst_id(iid),
215 PhiNode* slice_memory(const TypePtr* adr_type) const;
216 PhiNode* split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const;
217 // like make(r, x), but does not initialize the in edges to x
218 static PhiNode* make_blank( Node* r, Node* x );
219
220 // Accessors
221 RegionNode* region() const { Node* r = in(Region); assert(!r || r->is_Region(), ""); return (RegionNode*)r; }
222
223 bool is_tripcount(BasicType bt) const;
224
225 // Determine a unique non-trivial input, if any.
226 // Ignore casts if it helps. Return null on failure.
227 Node* unique_input(PhaseValues* phase, bool uncast);
228 Node* unique_input(PhaseValues* phase) {
229 Node* uin = unique_input(phase, false);
230 if (uin == nullptr) {
231 uin = unique_input(phase, true);
232 }
233 return uin;
234 }
235
236 // Check for a simple dead loop.
237 enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
238 LoopSafety simple_data_loop_check(Node *in) const;
239 // Is it unsafe data loop? It becomes a dead loop if this phi node removed.
240 bool is_unsafe_data_reference(Node *in) const;
241 int is_diamond_phi() const;
242 bool try_clean_memory_phi(PhaseIterGVN* igvn);
243 virtual int Opcode() const;
244 virtual bool pinned() const { return in(0) != nullptr; }
245 virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; }
246
247 void set_inst_mem_id(int inst_mem_id) { _inst_mem_id = inst_mem_id; }
248 int inst_mem_id() const { return _inst_mem_id; }
249 int inst_id() const { return _inst_id; }
250 int inst_index() const { return _inst_index; }
251 int inst_offset() const { return _inst_offset; }
252 bool is_same_inst_field(const Type* tp, int mem_id, int id, int index, int offset) {
253 return type()->basic_type() == tp->basic_type() &&
254 inst_mem_id() == mem_id &&
255 inst_id() == id &&
256 inst_index() == index &&
257 inst_offset() == offset &&
258 type()->higher_equal(tp);
259 }
260
261 virtual const Type* Value(PhaseGVN* phase) const;
262 virtual Node* Identity(PhaseGVN* phase);
263 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
264 virtual const RegMask &out_RegMask() const;
265 virtual const RegMask &in_RegMask(uint) const;
266 #ifndef PRODUCT
267 virtual void dump_spec(outputStream *st) const;
268 #endif
269 #ifdef ASSERT
270 void verify_adr_type(VectorSet& visited, const TypePtr* at) const;
271 void verify_adr_type(bool recursive = false) const;
272 #else //ASSERT
273 void verify_adr_type(bool recursive = false) const {}
274 #endif //ASSERT
275
276 const TypeTuple* collect_types(PhaseGVN* phase) const;
277 bool can_be_replaced_by(const PhiNode* other) const;
278 };
279
280 //------------------------------GotoNode---------------------------------------
453 }
454
455 virtual int Opcode() const;
456 virtual bool pinned() const { return true; }
457 virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; }
458 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
459 virtual const Type* Value(PhaseGVN* phase) const;
460 virtual uint required_outcnt() const { return 2; }
461 virtual const RegMask &out_RegMask() const;
462 Node* fold_compares(PhaseIterGVN* phase);
463 static Node* up_one_dom(Node* curr, bool linear_only = false);
464 bool is_zero_trip_guard() const;
465 Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool pin_array_access_nodes);
466 ProjNode* uncommon_trap_proj(CallStaticJavaNode*& call, Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
467
468 // Takes the type of val and filters it through the test represented
469 // by if_proj and returns a more refined type if one is produced.
470 // Returns null is it couldn't improve the type.
471 static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj);
472
473 AssertionPredicateType assertion_predicate_type() const {
474 return _assertion_predicate_type;
475 }
476
477 #ifndef PRODUCT
478 virtual void dump_spec(outputStream *st) const;
479 #endif
480
481 bool same_condition(const Node* dom, PhaseIterGVN* igvn) const;
482 };
483
484 class RangeCheckNode : public IfNode {
485 private:
486 int is_range_check(Node*& range, Node*& index, jint& offset);
487
488 public:
489 RangeCheckNode(Node* control, Node* bol, float p, float fcnt) : IfNode(control, bol, p, fcnt) {
490 init_class_id(Class_RangeCheck);
491 }
492
741 virtual int Opcode() const;
742 virtual bool pinned() const { return true; };
743 virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; }
744 virtual const Type* Value(PhaseGVN* phase) const;
745 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
746 virtual uint required_outcnt() const { return 2; }
747 virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { }
748 virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
749 #ifndef PRODUCT
750 virtual void format( PhaseRegAlloc *, outputStream *st ) const;
751 #endif
752 };
753
754 //------------------------------BlackholeNode----------------------------
755 // Blackhole all arguments. This node would survive through the compiler
756 // the effects on its arguments, and would be finally matched to nothing.
757 class BlackholeNode : public MultiNode {
758 public:
759 BlackholeNode(Node* ctrl) : MultiNode(1) {
760 init_req(TypeFunc::Control, ctrl);
761 }
762 virtual int Opcode() const;
763 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
764 virtual const Type* bottom_type() const { return TypeTuple::MEMBAR; }
765 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
766
767 const RegMask &in_RegMask(uint idx) const {
768 // Fake the incoming arguments mask for blackholes: accept all registers
769 // and all stack slots. This would avoid any redundant register moves
770 // for blackhole inputs.
771 return RegMask::ALL;
772 }
773 #ifndef PRODUCT
774 virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
775 #endif
776 };
777
778
779 #endif // SHARE_OPTO_CFGNODE_HPP
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_CFGNODE_HPP
26 #define SHARE_OPTO_CFGNODE_HPP
27
28 #include "opto/multnode.hpp"
29 #include "opto/node.hpp"
30 #include "opto/opcodes.hpp"
31 #include "opto/predicates_enums.hpp"
32 #include "opto/type.hpp"
33 #include "runtime/arguments.hpp"
34
35 // Portions of code courtesy of Clifford Click
36
37 // Optimization - Graph Style
38
39 class Matcher;
40 class Node;
41 class RegionNode;
42 class TypeNode;
43 class PhiNode;
44 class GotoNode;
45 class MultiNode;
46 class MultiBranchNode;
47 class IfNode;
48 class PCTableNode;
49 class JumpNode;
50 class CatchNode;
51 class NeverBranchNode;
52 class BlackholeNode;
53 class ProjNode;
164 int _inst_mem_id; // Instance memory id (node index of the memory Phi)
165 int _inst_id; // Instance id of the memory slice.
166 const int _inst_index; // Alias index of the instance memory slice.
167 // Array elements references have the same alias_idx but different offset.
168 const int _inst_offset; // Offset of the instance memory slice.
169 // Size is bigger to hold the _adr_type field.
170 virtual uint hash() const; // Check the type
171 virtual bool cmp( const Node &n ) const;
172 virtual uint size_of() const { return sizeof(*this); }
173
174 // Determine if CMoveNode::is_cmove_id can be used at this join point.
175 Node* is_cmove_id(PhaseTransform* phase, int true_path);
176 bool wait_for_region_igvn(PhaseGVN* phase);
177 bool is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase);
178
179 static Node* clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn);
180 static Node* merge_through_phi(Node* root_phi, PhaseIterGVN* igvn);
181
182 bool must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const;
183
184 bool can_push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass*& inline_klass);
185 InlineTypeNode* push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass* inline_klass);
186
187 bool is_split_through_mergemem_terminating() const;
188
189 void verify_type_stability(const PhaseGVN* phase, const Type* union_of_input_types, const Type* new_type) const NOT_DEBUG_RETURN;
190 bool wait_for_cast_input_igvn(const PhaseIterGVN* igvn) const;
191
192 public:
193 // Node layout (parallels RegionNode):
194 enum { Region, // Control input is the Phi's region.
195 Input // Input values are [1..len)
196 };
197
198 PhiNode( Node *r, const Type *t, const TypePtr* at = nullptr,
199 const int imid = -1,
200 const int iid = TypeOopPtr::InstanceTop,
201 const int iidx = Compile::AliasIdxTop,
202 const int ioffs = Type::OffsetTop )
203 : TypeNode(t,r->req()),
204 _adr_type(at),
205 _inst_mem_id(imid),
206 _inst_id(iid),
219 PhiNode* slice_memory(const TypePtr* adr_type) const;
220 PhiNode* split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const;
221 // like make(r, x), but does not initialize the in edges to x
222 static PhiNode* make_blank( Node* r, Node* x );
223
224 // Accessors
225 RegionNode* region() const { Node* r = in(Region); assert(!r || r->is_Region(), ""); return (RegionNode*)r; }
226
227 bool is_tripcount(BasicType bt) const;
228
229 // Determine a unique non-trivial input, if any.
230 // Ignore casts if it helps. Return null on failure.
231 Node* unique_input(PhaseValues* phase, bool uncast);
232 Node* unique_input(PhaseValues* phase) {
233 Node* uin = unique_input(phase, false);
234 if (uin == nullptr) {
235 uin = unique_input(phase, true);
236 }
237 return uin;
238 }
239 Node* unique_constant_input_recursive(PhaseGVN* phase);
240
241 // Check for a simple dead loop.
242 enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
243 LoopSafety simple_data_loop_check(Node *in) const;
244 // Is it unsafe data loop? It becomes a dead loop if this phi node removed.
245 bool is_unsafe_data_reference(Node *in) const;
246 int is_diamond_phi() const;
247 bool try_clean_memory_phi(PhaseIterGVN* igvn);
248 virtual int Opcode() const;
249 virtual bool pinned() const { return in(0) != nullptr; }
250 virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; }
251
252 void set_inst_mem_id(int inst_mem_id) { _inst_mem_id = inst_mem_id; }
253 int inst_mem_id() const { return _inst_mem_id; }
254 int inst_id() const { return _inst_id; }
255 int inst_index() const { return _inst_index; }
256 int inst_offset() const { return _inst_offset; }
257 bool is_same_inst_field(const Type* tp, int mem_id, int id, int index, int offset) {
258 return type()->basic_type() == tp->basic_type() &&
259 inst_mem_id() == mem_id &&
260 inst_id() == id &&
261 inst_index() == index &&
262 inst_offset() == offset &&
263 type()->higher_equal(tp);
264 }
265
266 bool can_be_inline_type() const {
267 return Arguments::is_valhalla_enabled() && _type->isa_instptr() && _type->is_instptr()->can_be_inline_type();
268 }
269
270 Node* try_push_inline_types_down(PhaseGVN* phase, bool can_reshape);
271 DEBUG_ONLY(bool can_push_inline_types_down(PhaseGVN* phase);)
272
273 virtual const Type* Value(PhaseGVN* phase) const;
274 virtual Node* Identity(PhaseGVN* phase);
275 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
276 virtual const RegMask &out_RegMask() const;
277 virtual const RegMask &in_RegMask(uint) const;
278 #ifndef PRODUCT
279 virtual void dump_spec(outputStream *st) const;
280 #endif
281 #ifdef ASSERT
282 void verify_adr_type(VectorSet& visited, const TypePtr* at) const;
283 void verify_adr_type(bool recursive = false) const;
284 #else //ASSERT
285 void verify_adr_type(bool recursive = false) const {}
286 #endif //ASSERT
287
288 const TypeTuple* collect_types(PhaseGVN* phase) const;
289 bool can_be_replaced_by(const PhiNode* other) const;
290 };
291
292 //------------------------------GotoNode---------------------------------------
465 }
466
467 virtual int Opcode() const;
468 virtual bool pinned() const { return true; }
469 virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; }
470 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
471 virtual const Type* Value(PhaseGVN* phase) const;
472 virtual uint required_outcnt() const { return 2; }
473 virtual const RegMask &out_RegMask() const;
474 Node* fold_compares(PhaseIterGVN* phase);
475 static Node* up_one_dom(Node* curr, bool linear_only = false);
476 bool is_zero_trip_guard() const;
477 Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool pin_array_access_nodes);
478 ProjNode* uncommon_trap_proj(CallStaticJavaNode*& call, Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
479
480 // Takes the type of val and filters it through the test represented
481 // by if_proj and returns a more refined type if one is produced.
482 // Returns null is it couldn't improve the type.
483 static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj);
484
485 bool is_flat_array_check(PhaseTransform* phase, Node** array = nullptr);
486
487 AssertionPredicateType assertion_predicate_type() const {
488 return _assertion_predicate_type;
489 }
490
491 #ifndef PRODUCT
492 virtual void dump_spec(outputStream *st) const;
493 #endif
494
495 bool same_condition(const Node* dom, PhaseIterGVN* igvn) const;
496 };
497
498 class RangeCheckNode : public IfNode {
499 private:
500 int is_range_check(Node*& range, Node*& index, jint& offset);
501
502 public:
503 RangeCheckNode(Node* control, Node* bol, float p, float fcnt) : IfNode(control, bol, p, fcnt) {
504 init_class_id(Class_RangeCheck);
505 }
506
755 virtual int Opcode() const;
756 virtual bool pinned() const { return true; };
757 virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; }
758 virtual const Type* Value(PhaseGVN* phase) const;
759 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
760 virtual uint required_outcnt() const { return 2; }
761 virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { }
762 virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
763 #ifndef PRODUCT
764 virtual void format( PhaseRegAlloc *, outputStream *st ) const;
765 #endif
766 };
767
768 //------------------------------BlackholeNode----------------------------
769 // Blackhole all arguments. This node would survive through the compiler
770 // the effects on its arguments, and would be finally matched to nothing.
771 class BlackholeNode : public MultiNode {
772 public:
773 BlackholeNode(Node* ctrl) : MultiNode(1) {
774 init_req(TypeFunc::Control, ctrl);
775 init_class_id(Class_Blackhole);
776 }
777 virtual int Opcode() const;
778 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
779 virtual const Type* bottom_type() const { return TypeTuple::MEMBAR; }
780 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
781
782 const RegMask &in_RegMask(uint idx) const {
783 // Fake the incoming arguments mask for blackholes: accept all registers
784 // and all stack slots. This would avoid any redundant register moves
785 // for blackhole inputs.
786 return RegMask::ALL;
787 }
788 #ifndef PRODUCT
789 virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
790 #endif
791 };
792
793
794 #endif // SHARE_OPTO_CFGNODE_HPP
|