1 /*
  2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OPTO_MULTNODE_HPP
 26 #define SHARE_OPTO_MULTNODE_HPP
 27 
 28 #include "opto/node.hpp"
 29 
 30 class Matcher;
 31 class ProjNode;
 32 
 33 //------------------------------MultiNode--------------------------------------
 34 // This class defines a MultiNode, a Node which produces many values.  The
 35 // values are wrapped up in a tuple Type, i.e. a TypeTuple.
 36 class MultiNode : public Node {
 37 public:
 38   MultiNode( uint required ) : Node(required) {
 39     init_class_id(Class_Multi);
 40   }
 41   virtual int Opcode() const;
 42   virtual const Type *bottom_type() const = 0;
 43   virtual bool       is_CFG() const { return true; }
 44   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 45   virtual const RegMask &out_RegMask() const;
 46   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
 47   virtual uint ideal_reg() const { return NotAMachineReg; }
 48   ProjNode* proj_out(uint which_proj) const; // Get a named projection
 49   ProjNode* proj_out_or_null(uint which_proj) const;
 50   ProjNode* proj_out_or_null(uint which_proj, bool is_io_use) const;
 51   uint number_of_projs(uint which_proj) const;
 52   uint number_of_projs(uint which_proj, bool is_io_use) const;
 53 
 54 protected:
 55 
 56   // Provide single interface for DUIterator_Fast/DUIterator for template method below
 57   class UsesIteratorFast {
 58     DUIterator_Fast& _imax;
 59     DUIterator_Fast& _i;
 60     const Node* _node;
 61 
 62   public:
 63     bool cont() {
 64       return _i < _imax;
 65     }
 66     void next() {
 67       _i++;
 68     }
 69     Node* current() {
 70       return _node->fast_out(_i);
 71     }
 72     UsesIteratorFast(DUIterator_Fast& imax, DUIterator_Fast& i, const Node* node)
 73       : _imax(imax), _i(i), _node(node) {
 74     }
 75   };
 76 
 77   class UsesIterator {
 78     DUIterator& _i;
 79     const Node* _node;
 80 
 81   public:
 82     bool cont() {
 83       return _node->has_out(_i);
 84     }
 85     void next() {
 86       _i++;
 87     }
 88     Node* current() {
 89       return _node->out(_i);
 90     }
 91     UsesIterator(DUIterator& i, const Node* node)
 92       : _i(i), _node(node) {
 93     }
 94   };
 95 
 96   // Iterate with i over all Proj uses calling callback
 97   template<class Callback, class Iterator> ProjNode* apply_to_projs_any_iterator(Iterator i, Callback callback) const {
 98     for (; i.cont(); i.next()) {
 99       Node* p = i.current();
100       if (p->is_Proj()) {
101         ProjNode* proj = p->as_Proj();
102         ApplyToProjs result = callback(proj);
103         if (result == BREAK_AND_RETURN_CURRENT_PROJ) {
104           return proj;
105         }
106         assert(result == CONTINUE, "should be either break or continue");
107       } else {
108         assert(p == this && is_Start(), "else must be proj");
109       }
110     }
111     return nullptr;
112   }
113   enum ApplyToProjs {
114     CONTINUE,
115     BREAK_AND_RETURN_CURRENT_PROJ
116   };
117 
118   // Run callback on projections with iterator passed as argument
119   template <class Callback> ProjNode* apply_to_projs(DUIterator_Fast& imax, DUIterator_Fast& i, Callback callback, uint which_proj) const;
120 
121   // Same but with default iterator and for matching _con
122   template<class Callback> ProjNode* apply_to_projs(Callback callback, uint which_proj) const {
123     DUIterator_Fast imax, i = fast_outs(imax);
124     return apply_to_projs(imax, i, callback, which_proj);
125   }
126 
127   // Same but for matching _con and _is_io_use
128   template <class Callback> ProjNode* apply_to_projs(Callback callback, uint which_proj, bool is_io_use) const;
129 
130 public:
131   template<class Callback> void for_each_proj(Callback callback, uint which_proj) const {
132     auto callback_always_continue = [&](ProjNode* proj) {
133       callback(proj);
134       return MultiNode::CONTINUE;
135     };
136     apply_to_projs(callback_always_continue, which_proj);
137   }
138 
139   template <class Callback> void for_each_proj(Callback callback, uint which_proj, bool is_io_use) const {
140     auto callback_always_continue = [&](ProjNode* proj) {
141       callback(proj);
142       return MultiNode::CONTINUE;
143     };
144     apply_to_projs(callback_always_continue, which_proj, is_io_use);
145   }
146 
147 
148   ProjNode* find_first(uint which_proj) const;
149   ProjNode* find_first(uint which_proj, bool is_io_use) const;
150 };
151 
152 //------------------------------ProjNode---------------------------------------
153 // This class defines a Projection node.  Projections project a single element
154 // out of a tuple (or Signature) type.  Only MultiNodes produce TypeTuple
155 // results.
156 class ProjNode : public Node {
157 protected:
158   virtual uint hash() const;
159   virtual bool cmp( const Node &n ) const;
160   virtual uint size_of() const;
161   void check_con() const;       // Called from constructor.
162   const Type* proj_type(const Type* t) const;
163 
164 public:
165   ProjNode( Node *src, uint con, bool io_use = false )
166     : Node( src ), _con(con), _is_io_use(io_use)
167   {
168     init_class_id(Class_Proj);
169     // Optimistic setting. Need additional checks in Node::is_dead_loop_safe().
170     if (con != TypeFunc::Memory || src->is_Start())
171       init_flags(Flag_is_dead_loop_safe);
172     DEBUG_ONLY(check_con());
173   }
174   const uint _con;              // The field in the tuple we are projecting
175   const bool _is_io_use;        // Used to distinguish between the projections
176                                 // used on the control and io paths from a macro node
177   virtual int Opcode() const;
178   virtual bool is_CFG() const;
179   virtual const Type *bottom_type() const;
180   virtual const TypePtr *adr_type() const;
181   virtual bool pinned() const;
182   virtual Node* Identity(PhaseGVN* phase);
183   virtual const Type* Value(PhaseGVN* phase) const;
184   virtual uint ideal_reg() const;
185   virtual const RegMask &out_RegMask() const;
186 
187 #ifndef PRODUCT
188   virtual void dump_spec(outputStream *st) const;
189   virtual void dump_compact_spec(outputStream *st) const;
190 #endif
191 
192   // Return uncommon trap call node if proj is for "proj->[region->..]call_uct"
193   // null otherwise
194   CallStaticJavaNode* is_uncommon_trap_proj(Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
195   // Return uncommon trap call node for    "if(test)-> proj -> ...
196   //                                                 |
197   //                                                 V
198   //                                             other_proj->[region->..]call_uct"
199   // null otherwise
200   CallStaticJavaNode* is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
201 };
202 
203 // A ProjNode variant that captures an adr_type(). Used as a projection of InitializeNode to have the right adr_type()
204 // for array elements/fields.
205 class NarrowMemProjNode : public ProjNode {
206 private:
207   const TypePtr* const _adr_type;
208 protected:
209   virtual uint hash() const {
210     return ProjNode::hash() + _adr_type->hash();
211   }
212   virtual bool cmp(const Node& n) const {
213     return ProjNode::cmp(n) && ((NarrowMemProjNode&)n)._adr_type == _adr_type;
214   }
215   virtual uint size_of() const {
216     return sizeof(*this);
217   }
218 public:
219   NarrowMemProjNode(InitializeNode* src, const TypePtr* adr_type);
220 
221   virtual const TypePtr* adr_type() const {
222     return _adr_type;
223   }
224 
225   virtual int Opcode() const;
226 };
227 
228 template <class Callback> ProjNode* MultiNode::apply_to_projs(DUIterator_Fast& imax, DUIterator_Fast& i, Callback callback, uint which_proj) const {
229   auto filter = [&](ProjNode* proj) {
230     if (proj->_con == which_proj && callback(proj) == BREAK_AND_RETURN_CURRENT_PROJ) {
231       return BREAK_AND_RETURN_CURRENT_PROJ;
232     }
233     return CONTINUE;
234   };
235   return apply_to_projs_any_iterator(UsesIteratorFast(imax, i, this), filter);
236 }
237 
238 /* Tuples are used to avoid manual graph surgery. When a node with Proj outputs (such as a call)
239  * must be removed and its ouputs replaced by its input, or some other value, we can make its
240  * ::Ideal return a tuple of what we want for each output: the ::Identity of output Proj will
241  * take care to jump over the Tuple and directly pick up the right input of the Tuple.
242  *
243  * For instance, if a function call is proven to have no side effect and return the constant 0,
244  * we can replace it with the 6-tuple:
245  * (control input, IO input, memory input, frame ptr input, return addr input, Con:0)
246  * all the output projections will pick up the input of the now gone call, except for the result
247  * projection that is replaced by 0.
248  *
249  * Using TupleNode avoid manual graph surgery and leave that to our expert surgeon: IGVN.
250  * Since the user of a Tuple are expected to be Proj, when creating a tuple during idealization,
251  * the output Proj should be enqueued for IGVN immediately after, and the tuple should not survive
252  * after the current IGVN.
253  */
254 class TupleNode : public MultiNode {
255   const TypeTuple* _tf;
256 
257   template <typename... NN>
258   static void make_helper(TupleNode* tn, uint i, Node* node, NN... nn) {
259     tn->set_req(i, node);
260     make_helper(tn, i + 1, nn...);
261   }
262 
263   static void make_helper(TupleNode*, uint) {}
264 
265 public:
266   TupleNode(const TypeTuple* tf) : MultiNode(tf->cnt()), _tf(tf) {}
267 
268   int Opcode() const override;
269   const Type* bottom_type() const override { return _tf; }
270 
271   /* Give as many `Node*` as you want in the `nn` pack:
272    * TupleNode::make(tf, input1)
273    * TupleNode::make(tf, input1, input2, input3, input4)
274    */
275   template <typename... NN>
276   static TupleNode* make(const TypeTuple* tf, NN... nn) {
277     TupleNode* tn = new TupleNode(tf);
278     make_helper(tn, 0, nn...);
279     return tn;
280   }
281 };
282 
283 #endif // SHARE_OPTO_MULTNODE_HPP