163 int _inst_mem_id; // Instance memory id (node index of the memory Phi)
164 int _inst_id; // Instance id of the memory slice.
165 const int _inst_index; // Alias index of the instance memory slice.
166 // Array elements references have the same alias_idx but different offset.
167 const int _inst_offset; // Offset of the instance memory slice.
168 // Size is bigger to hold the _adr_type field.
169 virtual uint hash() const; // Check the type
170 virtual bool cmp( const Node &n ) const;
171 virtual uint size_of() const { return sizeof(*this); }
172
173 // Determine if CMoveNode::is_cmove_id can be used at this join point.
174 Node* is_cmove_id(PhaseTransform* phase, int true_path);
175 bool wait_for_region_igvn(PhaseGVN* phase);
176 bool is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase);
177
178 static Node* clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn);
179 static Node* merge_through_phi(Node* root_phi, PhaseIterGVN* igvn);
180
181 bool must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const;
182
183 bool is_split_through_mergemem_terminating() const;
184
185 public:
186 // Node layout (parallels RegionNode):
187 enum { Region, // Control input is the Phi's region.
188 Input // Input values are [1..len)
189 };
190
191 PhiNode( Node *r, const Type *t, const TypePtr* at = nullptr,
192 const int imid = -1,
193 const int iid = TypeOopPtr::InstanceTop,
194 const int iidx = Compile::AliasIdxTop,
195 const int ioffs = Type::OffsetTop )
196 : TypeNode(t,r->req()),
197 _adr_type(at),
198 _inst_mem_id(imid),
199 _inst_id(iid),
200 _inst_index(iidx),
201 _inst_offset(ioffs)
202 {
212 PhiNode* slice_memory(const TypePtr* adr_type) const;
213 PhiNode* split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const;
214 // like make(r, x), but does not initialize the in edges to x
215 static PhiNode* make_blank( Node* r, Node* x );
216
217 // Accessors
218 RegionNode* region() const { Node* r = in(Region); assert(!r || r->is_Region(), ""); return (RegionNode*)r; }
219
220 bool is_tripcount(BasicType bt) const;
221
222 // Determine a unique non-trivial input, if any.
223 // Ignore casts if it helps. Return null on failure.
224 Node* unique_input(PhaseValues* phase, bool uncast);
225 Node* unique_input(PhaseValues* phase) {
226 Node* uin = unique_input(phase, false);
227 if (uin == nullptr) {
228 uin = unique_input(phase, true);
229 }
230 return uin;
231 }
232
233 // Check for a simple dead loop.
234 enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
235 LoopSafety simple_data_loop_check(Node *in) const;
236 // Is it unsafe data loop? It becomes a dead loop if this phi node removed.
237 bool is_unsafe_data_reference(Node *in) const;
238 int is_diamond_phi() const;
239 bool try_clean_memory_phi(PhaseIterGVN* igvn);
240 virtual int Opcode() const;
241 virtual bool pinned() const { return in(0) != nullptr; }
242 virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; }
243
244 void set_inst_mem_id(int inst_mem_id) { _inst_mem_id = inst_mem_id; }
245 int inst_mem_id() const { return _inst_mem_id; }
246 int inst_id() const { return _inst_id; }
247 int inst_index() const { return _inst_index; }
248 int inst_offset() const { return _inst_offset; }
249 bool is_same_inst_field(const Type* tp, int mem_id, int id, int index, int offset) {
250 return type()->basic_type() == tp->basic_type() &&
251 inst_mem_id() == mem_id &&
252 inst_id() == id &&
253 inst_index() == index &&
254 inst_offset() == offset &&
255 type()->higher_equal(tp);
256 }
257
258 virtual const Type* Value(PhaseGVN* phase) const;
259 virtual Node* Identity(PhaseGVN* phase);
260 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
261 virtual const RegMask &out_RegMask() const;
262 virtual const RegMask &in_RegMask(uint) const;
263 #ifndef PRODUCT
264 virtual void dump_spec(outputStream *st) const;
265 #endif
266 #ifdef ASSERT
267 void verify_adr_type(VectorSet& visited, const TypePtr* at) const;
268 void verify_adr_type(bool recursive = false) const;
269 #else //ASSERT
270 void verify_adr_type(bool recursive = false) const {}
271 #endif //ASSERT
272
273 const TypeTuple* collect_types(PhaseGVN* phase) const;
274 };
275
276 //------------------------------GotoNode---------------------------------------
277 // GotoNodes perform direct branches.
431 static IfNode* make_with_same_profile(IfNode* if_node_profile, Node* ctrl, Node* bol);
432
433 virtual int Opcode() const;
434 virtual bool pinned() const { return true; }
435 virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; }
436 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
437 virtual const Type* Value(PhaseGVN* phase) const;
438 virtual int required_outcnt() const { return 2; }
439 virtual const RegMask &out_RegMask() const;
440 Node* fold_compares(PhaseIterGVN* phase);
441 static Node* up_one_dom(Node* curr, bool linear_only = false);
442 bool is_zero_trip_guard() const;
443 Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool pin_array_access_nodes);
444 ProjNode* uncommon_trap_proj(CallStaticJavaNode*& call, Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
445
446 // Takes the type of val and filters it through the test represented
447 // by if_proj and returns a more refined type if one is produced.
448 // Returns null is it couldn't improve the type.
449 static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj);
450
451 AssertionPredicateType assertion_predicate_type() const {
452 return _assertion_predicate_type;
453 }
454
455 #ifndef PRODUCT
456 virtual void dump_spec(outputStream *st) const;
457 #endif
458
459 bool same_condition(const Node* dom, PhaseIterGVN* igvn) const;
460 };
461
462 class RangeCheckNode : public IfNode {
463 private:
464 int is_range_check(Node*& range, Node*& index, jint& offset);
465
466 public:
467 RangeCheckNode(Node* control, Node* bol, float p, float fcnt) : IfNode(control, bol, p, fcnt) {
468 init_class_id(Class_RangeCheck);
469 }
470
714 virtual int Opcode() const;
715 virtual bool pinned() const { return true; };
716 virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; }
717 virtual const Type* Value(PhaseGVN* phase) const;
718 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
719 virtual int required_outcnt() const { return 2; }
720 virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { }
721 virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
722 #ifndef PRODUCT
723 virtual void format( PhaseRegAlloc *, outputStream *st ) const;
724 #endif
725 };
726
727 //------------------------------BlackholeNode----------------------------
728 // Blackhole all arguments. This node would survive through the compiler
729 // the effects on its arguments, and would be finally matched to nothing.
730 class BlackholeNode : public MultiNode {
731 public:
732 BlackholeNode(Node* ctrl) : MultiNode(1) {
733 init_req(TypeFunc::Control, ctrl);
734 }
735 virtual int Opcode() const;
736 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
737 virtual const Type* bottom_type() const { return TypeTuple::MEMBAR; }
738 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
739
740 const RegMask &in_RegMask(uint idx) const {
741 // Fake the incoming arguments mask for blackholes: accept all registers
742 // and all stack slots. This would avoid any redundant register moves
743 // for blackhole inputs.
744 return RegMask::All;
745 }
746 #ifndef PRODUCT
747 virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
748 #endif
749 };
750
751
752 #endif // SHARE_OPTO_CFGNODE_HPP
|
163 int _inst_mem_id; // Instance memory id (node index of the memory Phi)
164 int _inst_id; // Instance id of the memory slice.
165 const int _inst_index; // Alias index of the instance memory slice.
166 // Array elements references have the same alias_idx but different offset.
167 const int _inst_offset; // Offset of the instance memory slice.
168 // Size is bigger to hold the _adr_type field.
169 virtual uint hash() const; // Check the type
170 virtual bool cmp( const Node &n ) const;
171 virtual uint size_of() const { return sizeof(*this); }
172
173 // Determine if CMoveNode::is_cmove_id can be used at this join point.
174 Node* is_cmove_id(PhaseTransform* phase, int true_path);
175 bool wait_for_region_igvn(PhaseGVN* phase);
176 bool is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase);
177
178 static Node* clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn);
179 static Node* merge_through_phi(Node* root_phi, PhaseIterGVN* igvn);
180
181 bool must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const;
182
183 bool can_push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass*& inline_klass);
184 InlineTypeNode* push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass* inline_klass);
185
186 bool is_split_through_mergemem_terminating() const;
187
188 public:
189 // Node layout (parallels RegionNode):
190 enum { Region, // Control input is the Phi's region.
191 Input // Input values are [1..len)
192 };
193
194 PhiNode( Node *r, const Type *t, const TypePtr* at = nullptr,
195 const int imid = -1,
196 const int iid = TypeOopPtr::InstanceTop,
197 const int iidx = Compile::AliasIdxTop,
198 const int ioffs = Type::OffsetTop )
199 : TypeNode(t,r->req()),
200 _adr_type(at),
201 _inst_mem_id(imid),
202 _inst_id(iid),
203 _inst_index(iidx),
204 _inst_offset(ioffs)
205 {
215 PhiNode* slice_memory(const TypePtr* adr_type) const;
216 PhiNode* split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const;
217 // like make(r, x), but does not initialize the in edges to x
218 static PhiNode* make_blank( Node* r, Node* x );
219
220 // Accessors
221 RegionNode* region() const { Node* r = in(Region); assert(!r || r->is_Region(), ""); return (RegionNode*)r; }
222
223 bool is_tripcount(BasicType bt) const;
224
225 // Determine a unique non-trivial input, if any.
226 // Ignore casts if it helps. Return null on failure.
227 Node* unique_input(PhaseValues* phase, bool uncast);
228 Node* unique_input(PhaseValues* phase) {
229 Node* uin = unique_input(phase, false);
230 if (uin == nullptr) {
231 uin = unique_input(phase, true);
232 }
233 return uin;
234 }
235 Node* unique_input_recursive(PhaseGVN* phase);
236
237 // Check for a simple dead loop.
238 enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
239 LoopSafety simple_data_loop_check(Node *in) const;
240 // Is it unsafe data loop? It becomes a dead loop if this phi node removed.
241 bool is_unsafe_data_reference(Node *in) const;
242 int is_diamond_phi() const;
243 bool try_clean_memory_phi(PhaseIterGVN* igvn);
244 virtual int Opcode() const;
245 virtual bool pinned() const { return in(0) != nullptr; }
246 virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; }
247
248 void set_inst_mem_id(int inst_mem_id) { _inst_mem_id = inst_mem_id; }
249 int inst_mem_id() const { return _inst_mem_id; }
250 int inst_id() const { return _inst_id; }
251 int inst_index() const { return _inst_index; }
252 int inst_offset() const { return _inst_offset; }
253 bool is_same_inst_field(const Type* tp, int mem_id, int id, int index, int offset) {
254 return type()->basic_type() == tp->basic_type() &&
255 inst_mem_id() == mem_id &&
256 inst_id() == id &&
257 inst_index() == index &&
258 inst_offset() == offset &&
259 type()->higher_equal(tp);
260 }
261
262 bool can_be_inline_type() const {
263 return EnableValhalla && _type->isa_instptr() && _type->is_instptr()->can_be_inline_type();
264 }
265
266 Node* try_push_inline_types_down(PhaseGVN* phase, bool can_reshape);
267 DEBUG_ONLY(bool can_push_inline_types_down(PhaseGVN* phase);)
268
269 virtual const Type* Value(PhaseGVN* phase) const;
270 virtual Node* Identity(PhaseGVN* phase);
271 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
272 virtual const RegMask &out_RegMask() const;
273 virtual const RegMask &in_RegMask(uint) const;
274 #ifndef PRODUCT
275 virtual void dump_spec(outputStream *st) const;
276 #endif
277 #ifdef ASSERT
278 void verify_adr_type(VectorSet& visited, const TypePtr* at) const;
279 void verify_adr_type(bool recursive = false) const;
280 #else //ASSERT
281 void verify_adr_type(bool recursive = false) const {}
282 #endif //ASSERT
283
284 const TypeTuple* collect_types(PhaseGVN* phase) const;
285 };
286
287 //------------------------------GotoNode---------------------------------------
288 // GotoNodes perform direct branches.
442 static IfNode* make_with_same_profile(IfNode* if_node_profile, Node* ctrl, Node* bol);
443
444 virtual int Opcode() const;
445 virtual bool pinned() const { return true; }
446 virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; }
447 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
448 virtual const Type* Value(PhaseGVN* phase) const;
449 virtual int required_outcnt() const { return 2; }
450 virtual const RegMask &out_RegMask() const;
451 Node* fold_compares(PhaseIterGVN* phase);
452 static Node* up_one_dom(Node* curr, bool linear_only = false);
453 bool is_zero_trip_guard() const;
454 Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool pin_array_access_nodes);
455 ProjNode* uncommon_trap_proj(CallStaticJavaNode*& call, Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
456
457 // Takes the type of val and filters it through the test represented
458 // by if_proj and returns a more refined type if one is produced.
459 // Returns null is it couldn't improve the type.
460 static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj);
461
462 bool is_flat_array_check(PhaseTransform* phase, Node** array = nullptr);
463
464 AssertionPredicateType assertion_predicate_type() const {
465 return _assertion_predicate_type;
466 }
467
468 #ifndef PRODUCT
469 virtual void dump_spec(outputStream *st) const;
470 #endif
471
472 bool same_condition(const Node* dom, PhaseIterGVN* igvn) const;
473 };
474
475 class RangeCheckNode : public IfNode {
476 private:
477 int is_range_check(Node*& range, Node*& index, jint& offset);
478
479 public:
480 RangeCheckNode(Node* control, Node* bol, float p, float fcnt) : IfNode(control, bol, p, fcnt) {
481 init_class_id(Class_RangeCheck);
482 }
483
727 virtual int Opcode() const;
728 virtual bool pinned() const { return true; };
729 virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; }
730 virtual const Type* Value(PhaseGVN* phase) const;
731 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
732 virtual int required_outcnt() const { return 2; }
733 virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { }
734 virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
735 #ifndef PRODUCT
736 virtual void format( PhaseRegAlloc *, outputStream *st ) const;
737 #endif
738 };
739
740 //------------------------------BlackholeNode----------------------------
741 // Blackhole all arguments. This node would survive through the compiler
742 // the effects on its arguments, and would be finally matched to nothing.
743 class BlackholeNode : public MultiNode {
744 public:
745 BlackholeNode(Node* ctrl) : MultiNode(1) {
746 init_req(TypeFunc::Control, ctrl);
747 init_class_id(Class_Blackhole);
748 }
749 virtual int Opcode() const;
750 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
751 virtual const Type* bottom_type() const { return TypeTuple::MEMBAR; }
752 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
753
754 const RegMask &in_RegMask(uint idx) const {
755 // Fake the incoming arguments mask for blackholes: accept all registers
756 // and all stack slots. This would avoid any redundant register moves
757 // for blackhole inputs.
758 return RegMask::All;
759 }
760 #ifndef PRODUCT
761 virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
762 #endif
763 };
764
765
766 #endif // SHARE_OPTO_CFGNODE_HPP
|