1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OPTO_MULTNODE_HPP
 26 #define SHARE_OPTO_MULTNODE_HPP
 27 
 28 #include "opto/node.hpp"
 29 
 30 class Matcher;
 31 class ProjNode;
 32 
 33 //------------------------------MultiNode--------------------------------------
 34 // This class defines a MultiNode, a Node which produces many values.  The
 35 // values are wrapped up in a tuple Type, i.e. a TypeTuple.
 36 class MultiNode : public Node {
 37 public:
 38   MultiNode( uint required ) : Node(required) {
 39     init_class_id(Class_Multi);
 40   }
 41   virtual int Opcode() const;
 42   virtual const Type *bottom_type() const = 0;
 43   virtual bool       is_CFG() const { return true; }
 44   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 45   virtual bool depends_only_on_test() const { return false; }
 46   virtual const RegMask &out_RegMask() const;
 47   virtual Node *match( const ProjNode *proj, const Matcher *m );
 48   virtual uint ideal_reg() const { return NotAMachineReg; }
 49   ProjNode* proj_out(uint which_proj) const; // Get a named projection
 50   ProjNode* proj_out_or_null(uint which_proj) const;
 51   ProjNode* proj_out_or_null(uint which_proj, bool is_io_use) const;
 52   uint number_of_projs(uint which_proj) const;
 53   uint number_of_projs(uint which_proj, bool is_io_use) const;
 54 
 55 protected:
 56 
 57   // Provide single interface for DUIterator_Fast/DUIterator for template method below
 58   class UsesIteratorFast {
 59     DUIterator_Fast& _imax;
 60     DUIterator_Fast& _i;
 61     const Node* _node;
 62 
 63   public:
 64     bool cont() {
 65       return _i < _imax;
 66     }
 67     void next() {
 68       _i++;
 69     }
 70     Node* current() {
 71       return _node->fast_out(_i);
 72     }
 73     UsesIteratorFast(DUIterator_Fast& imax, DUIterator_Fast& i, const Node* node)
 74       : _imax(imax), _i(i), _node(node) {
 75     }
 76   };
 77 
 78   class UsesIterator {
 79     DUIterator& _i;
 80     const Node* _node;
 81 
 82   public:
 83     bool cont() {
 84       return _node->has_out(_i);
 85     }
 86     void next() {
 87       _i++;
 88     }
 89     Node* current() {
 90       return _node->out(_i);
 91     }
 92     UsesIterator(DUIterator& i, const Node* node)
 93       : _i(i), _node(node) {
 94     }
 95   };
 96 
 97   // Iterate with i over all Proj uses calling callback
 98   template<class Callback, class Iterator> ProjNode* apply_to_projs_any_iterator(Iterator i, Callback callback) const {
 99     for (; i.cont(); i.next()) {
100       Node* p = i.current();
101       if (p->is_Proj()) {
102         ProjNode* proj = p->as_Proj();
103         ApplyToProjs result = callback(proj);
104         if (result == BREAK_AND_RETURN_CURRENT_PROJ) {
105           return proj;
106         }
107         assert(result == CONTINUE, "should be either break or continue");
108       } else {
109         assert(p == this && is_Start(), "else must be proj");
110       }
111     }
112     return nullptr;
113   }
114   enum ApplyToProjs {
115     CONTINUE,
116     BREAK_AND_RETURN_CURRENT_PROJ
117   };
118 
119   // Run callback on projections with iterator passed as argument
120   template <class Callback> ProjNode* apply_to_projs(DUIterator_Fast& imax, DUIterator_Fast& i, Callback callback, uint which_proj) const;
121 
122   // Same but with default iterator and for matching _con
123   template<class Callback> ProjNode* apply_to_projs(Callback callback, uint which_proj) const {
124     DUIterator_Fast imax, i = fast_outs(imax);
125     return apply_to_projs(imax, i, callback, which_proj);
126   }
127 
128   // Same but for matching _con and _is_io_use
129   template <class Callback> ProjNode* apply_to_projs(Callback callback, uint which_proj, bool is_io_use) const;
130 
131 public:
132   template<class Callback> void for_each_proj(Callback callback, uint which_proj) const {
133     auto callback_always_continue = [&](ProjNode* proj) {
134       callback(proj);
135       return MultiNode::CONTINUE;
136     };
137     apply_to_projs(callback_always_continue, which_proj);
138   }
139 
140   template <class Callback> void for_each_proj(Callback callback, uint which_proj, bool is_io_use) const {
141     auto callback_always_continue = [&](ProjNode* proj) {
142       callback(proj);
143       return MultiNode::CONTINUE;
144     };
145     apply_to_projs(callback_always_continue, which_proj, is_io_use);
146   }
147 
148 
149   ProjNode* find_first(uint which_proj) const;
150   ProjNode* find_first(uint which_proj, bool is_io_use) const;
151 };
152 
153 //------------------------------ProjNode---------------------------------------
154 // This class defines a Projection node.  Projections project a single element
155 // out of a tuple (or Signature) type.  Only MultiNodes produce TypeTuple
156 // results.
157 class ProjNode : public Node {
158 protected:
159   virtual uint hash() const;
160   virtual bool cmp( const Node &n ) const;
161   virtual uint size_of() const;
162   void check_con() const;       // Called from constructor.
163   const Type* proj_type(const Type* t) const;
164 
165 public:
166   ProjNode( Node *src, uint con, bool io_use = false )
167     : Node( src ), _con(con), _is_io_use(io_use)
168   {
169     init_class_id(Class_Proj);
170     // Optimistic setting. Need additional checks in Node::is_dead_loop_safe().
171     if (con != TypeFunc::Memory || src->is_Start())
172       init_flags(Flag_is_dead_loop_safe);
173     DEBUG_ONLY(check_con());
174   }
175   const uint _con;              // The field in the tuple we are projecting
176   const bool _is_io_use;        // Used to distinguish between the projections
177                                 // used on the control and io paths from a macro node
178   virtual int Opcode() const;
179   virtual bool      is_CFG() const;
180   virtual bool depends_only_on_test() const { return false; }
181   virtual const Type *bottom_type() const;
182   virtual const TypePtr *adr_type() const;
183   virtual bool pinned() const;
184   virtual Node* Identity(PhaseGVN* phase);
185   virtual const Type* Value(PhaseGVN* phase) const;
186   virtual uint ideal_reg() const;
187   virtual const RegMask &out_RegMask() const;
188 
189 #ifndef PRODUCT
190   virtual void dump_spec(outputStream *st) const;
191   virtual void dump_compact_spec(outputStream *st) const;
192 #endif
193 
194   // Return uncommon trap call node if proj is for "proj->[region->..]call_uct"
195   // null otherwise
196   CallStaticJavaNode* is_uncommon_trap_proj(Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
197   // Return uncommon trap call node for    "if(test)-> proj -> ...
198   //                                                 |
199   //                                                 V
200   //                                             other_proj->[region->..]call_uct"
201   // null otherwise
202   CallStaticJavaNode* is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
203 
204   // Return other proj node when this is a If proj node
205   ProjNode* other_if_proj() const;
206 };
207 
208 // A ProjNode variant that captures an adr_type(). Used as a projection of InitializeNode to have the right adr_type()
209 // for array elements/fields.
210 class NarrowMemProjNode : public ProjNode {
211 private:
212   const TypePtr* const _adr_type;
213 protected:
214   virtual uint hash() const {
215     return ProjNode::hash() + _adr_type->hash();
216   }
217   virtual bool cmp(const Node& n) const {
218     return ProjNode::cmp(n) && ((NarrowMemProjNode&)n)._adr_type == _adr_type;
219   }
220   virtual uint size_of() const {
221     return sizeof(*this);
222   }
223 public:
224   NarrowMemProjNode(InitializeNode* src, const TypePtr* adr_type);
225 
226   virtual const TypePtr* adr_type() const {
227     return _adr_type;
228   }
229 
230   virtual int Opcode() const;
231 };
232 
233 template <class Callback> ProjNode* MultiNode::apply_to_projs(DUIterator_Fast& imax, DUIterator_Fast& i, Callback callback, uint which_proj) const {
234   auto filter = [&](ProjNode* proj) {
235     if (proj->_con == which_proj && callback(proj) == BREAK_AND_RETURN_CURRENT_PROJ) {
236       return BREAK_AND_RETURN_CURRENT_PROJ;
237     }
238     return CONTINUE;
239   };
240   return apply_to_projs_any_iterator(UsesIteratorFast(imax, i, this), filter);
241 }
242 
243 /* Tuples are used to avoid manual graph surgery. When a node with Proj outputs (such as a call)
244  * must be removed and its ouputs replaced by its input, or some other value, we can make its
245  * ::Ideal return a tuple of what we want for each output: the ::Identity of output Proj will
246  * take care to jump over the Tuple and directly pick up the right input of the Tuple.
247  *
248  * For instance, if a function call is proven to have no side effect and return the constant 0,
249  * we can replace it with the 6-tuple:
250  * (control input, IO input, memory input, frame ptr input, return addr input, Con:0)
251  * all the output projections will pick up the input of the now gone call, except for the result
252  * projection that is replaced by 0.
253  *
254  * Using TupleNode avoid manual graph surgery and leave that to our expert surgeon: IGVN.
255  * Since the user of a Tuple are expected to be Proj, when creating a tuple during idealization,
256  * the output Proj should be enqueued for IGVN immediately after, and the tuple should not survive
257  * after the current IGVN.
258  */
259 class TupleNode : public MultiNode {
260   const TypeTuple* _tf;
261 
262   template <typename... NN>
263   static void make_helper(TupleNode* tn, uint i, Node* node, NN... nn) {
264     tn->set_req(i, node);
265     make_helper(tn, i + 1, nn...);
266   }
267 
268   static void make_helper(TupleNode*, uint) {}
269 
270 public:
271   TupleNode(const TypeTuple* tf) : MultiNode(tf->cnt()), _tf(tf) {}
272 
273   int Opcode() const override;
274   const Type* bottom_type() const override { return _tf; }
275 
276   /* Give as many `Node*` as you want in the `nn` pack:
277    * TupleNode::make(tf, input1)
278    * TupleNode::make(tf, input1, input2, input3, input4)
279    */
280   template <typename... NN>
281   static TupleNode* make(const TypeTuple* tf, NN... nn) {
282     TupleNode* tn = new TupleNode(tf);
283     make_helper(tn, 0, nn...);
284     return tn;
285   }
286 };
287 
288 #endif // SHARE_OPTO_MULTNODE_HPP