1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OPTO_PHASEX_HPP
 26 #define SHARE_OPTO_PHASEX_HPP
 27 
 28 #include "libadt/dict.hpp"
 29 #include "libadt/vectset.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "opto/memnode.hpp"
 32 #include "opto/node.hpp"
 33 #include "opto/phase.hpp"
 34 #include "opto/type.hpp"
 35 #include "utilities/globalDefinitions.hpp"
 36 
 37 class BarrierSetC2;
 38 class Compile;
 39 class ConINode;
 40 class ConLNode;
 41 class Node;
 42 class Type;
 43 class PhaseTransform;
 44 class   PhaseGVN;
 45 class     PhaseIterGVN;
 46 class       PhaseCCP;
 47 class   PhasePeephole;
 48 class   PhaseRegAlloc;
 49 
 50 
 51 //-----------------------------------------------------------------------------
 52 // Expandable closed hash-table of nodes, initialized to null.
 53 // Note that the constructor just zeros things
 54 // Storage is reclaimed when the Arena's lifetime is over.
 55 class NodeHash : public AnyObj {
 56 protected:
 57   Arena *_a;                    // Arena to allocate in
 58   uint   _max;                  // Size of table (power of 2)
 59   uint   _inserts;              // For grow and debug, count of hash_inserts
 60   uint   _insert_limit;         // 'grow' when _inserts reaches _insert_limit
 61   Node **_table;                // Hash table of Node pointers
 62   Node  *_sentinel;             // Replaces deleted entries in hash table
 63 
 64 public:
 65   NodeHash(Arena *arena, uint est_max_size);
 66 #ifdef ASSERT
 67   ~NodeHash();                  // Unlock all nodes upon destruction of table.
 68 #endif
 69   Node  *hash_find(const Node*);// Find an equivalent version in hash table
 70   Node  *hash_find_insert(Node*);// If not in table insert else return found node
 71   void   hash_insert(Node*);    // Insert into hash table
 72   bool   hash_delete(const Node*);// Replace with _sentinel in hash table
 73   void   check_grow() {
 74     _inserts++;
 75     if( _inserts == _insert_limit ) { grow(); }
 76     assert( _inserts <= _insert_limit, "hash table overflow");
 77     assert( _inserts < _max, "hash table overflow" );
 78   }
 79   static uint round_up(uint);   // Round up to nearest power of 2
 80   void   grow();                // Grow _table to next power of 2 and rehash
 81   // Return 75% of _max, rounded up.
 82   uint   insert_limit() const { return _max - (_max>>2); }
 83 
 84   void   clear();               // Set all entries to null, keep storage.
 85   // Size of hash table
 86   uint   size()         const { return _max; }
 87   // Return Node* at index in table
 88   Node  *at(uint table_index) {
 89     assert(table_index < _max, "Must be within table");
 90     return _table[table_index];
 91   }
 92 
 93   void   remove_useless_nodes(VectorSet& useful); // replace with sentinel
 94   void   check_no_speculative_types(); // Check no speculative part for type nodes in table
 95 
 96   Node  *sentinel() { return _sentinel; }
 97 
 98 #ifndef PRODUCT
 99   Node  *find_index(uint idx);  // For debugging
100   void   dump();                // For debugging, dump statistics
101   uint   _grows;                // For debugging, count of table grow()s
102   uint   _look_probes;          // For debugging, count of hash probes
103   uint   _lookup_hits;          // For debugging, count of hash_finds
104   uint   _lookup_misses;        // For debugging, count of hash_finds
105   uint   _insert_probes;        // For debugging, count of hash probes
106   uint   _delete_probes;        // For debugging, count of hash probes for deletes
107   uint   _delete_hits;          // For debugging, count of hash probes for deletes
108   uint   _delete_misses;        // For debugging, count of hash probes for deletes
109   uint   _total_inserts;        // For debugging, total inserts into hash table
110   uint   _total_insert_probes;  // For debugging, total probes while inserting
111 #endif
112   NONCOPYABLE(NodeHash);
113 };
114 
115 
116 //-----------------------------------------------------------------------------
117 // Map dense integer indices to Types.  Uses classic doubling-array trick.
118 // Abstractly provides an infinite array of Type*'s, initialized to null.
119 // Note that the constructor just zeros things, and since I use Arena
120 // allocation I do not need a destructor to reclaim storage.
121 // Despite the general name, this class is customized for use by PhaseValues.
122 class Type_Array : public AnyObj {
123   Arena *_a;                    // Arena to allocate in
124   uint   _max;
125   const Type **_types;
126   void grow( uint i );          // Grow array node to fit
127   const Type *operator[] ( uint i ) const // Lookup, or null for not mapped
128   { return (i<_max) ? _types[i] : (Type*)nullptr; }
129   friend class PhaseValues;
130 public:
131   Type_Array(Arena *a) : _a(a), _max(0), _types(0) {}
132   const Type *fast_lookup(uint i) const{assert(i<_max,"oob");return _types[i];}
133   // Extend the mapping: index i maps to Type *n.
134   void map( uint i, const Type *n ) { if( i>=_max ) grow(i); _types[i] = n; }
135   uint Size() const { return _max; }
136 #ifndef PRODUCT
137   void dump() const;
138 #endif
139   void swap(Type_Array &other) {
140     if (this != &other) {
141       assert(_a == other._a, "swapping for differing arenas is probably a bad idea");
142       ::swap(_max, other._max);
143       ::swap(_types, other._types);
144     }
145   }
146   NONCOPYABLE(Type_Array);
147 };
148 
149 
150 //------------------------------PhaseRemoveUseless-----------------------------
151 // Remove useless nodes from GVN hash-table, worklist, and graph
152 class PhaseRemoveUseless : public Phase {
153 protected:
154   Unique_Node_List _useful;   // Nodes reachable from root
155                               // list is allocated from current resource area
156 public:
157   PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List& worklist, PhaseNumber phase_num = Remove_Useless);
158 
159   Unique_Node_List *get_useful() { return &_useful; }
160 };
161 
162 //------------------------------PhaseRenumber----------------------------------
163 // Phase that first performs a PhaseRemoveUseless, then it renumbers compiler
164 // structures accordingly.
165 class PhaseRenumberLive : public PhaseRemoveUseless {
166 protected:
167   Type_Array _new_type_array; // Storage for the updated type information.
168   GrowableArray<int> _old2new_map;
169   Node_List _delayed;
170   bool _is_pass_finished;
171   uint _live_node_count;
172 
173   int update_embedded_ids(Node* n);
174   int new_index(int old_idx);
175 
176 public:
177   PhaseRenumberLive(PhaseGVN* gvn,
178                     Unique_Node_List& worklist,
179                     PhaseNumber phase_num = Remove_Useless_And_Renumber_Live);
180 };
181 
182 
183 //------------------------------PhaseTransform---------------------------------
184 // Phases that analyze, then transform.  Constructing the Phase object does any
185 // global or slow analysis.  The results are cached later for a fast
186 // transformation pass.  When the Phase object is deleted the cached analysis
187 // results are deleted.
188 class PhaseTransform : public Phase {
189 public:
190   PhaseTransform(PhaseNumber pnum) : Phase(pnum) {
191 #ifndef PRODUCT
192     clear_progress();
193     clear_transforms();
194     set_allow_progress(true);
195 #endif
196   }
197 
198   // Return a node which computes the same function as this node, but
199   // in a faster or cheaper fashion.
200   virtual Node *transform( Node *n ) = 0;
201 
202   // true if CFG node d dominates CFG node n
203   virtual bool is_dominator(Node *d, Node *n) { fatal("unimplemented for this pass"); return false; };
204 
205 #ifndef PRODUCT
206   uint   _count_progress;       // For profiling, count transforms that make progress
207   void   set_progress()        { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); }
208   void   clear_progress()      { _count_progress = 0; }
209   uint   made_progress() const { return _count_progress; }
210 
211   uint   _count_transforms;     // For profiling, count transforms performed
212   void   set_transforms()      { ++_count_transforms; }
213   void   clear_transforms()    { _count_transforms = 0; }
214   uint   made_transforms() const{ return _count_transforms; }
215 
216   bool   _allow_progress;      // progress not allowed during verification pass
217   void   set_allow_progress(bool allow) { _allow_progress = allow; }
218   bool   allow_progress()               { return _allow_progress; }
219 #endif
220 };
221 
222 // Phase infrastructure required for Node::Value computations.
223 // 1) Type array, and accessor methods.
224 // 2) Constants cache, which requires access to the types.
225 // 3) NodeHash table, to find identical nodes (and remove/update the hash of a node on modification).
226 class PhaseValues : public PhaseTransform {
227 protected:
228   bool      _iterGVN;
229 
230   // Hash table for value-numbering. Reference to "C->node_hash()",
231   NodeHash &_table;
232 
233   // Type array mapping node idx to Type*. Reference to "C->types()".
234   Type_Array &_types;
235 
236   // ConNode caches:
237   // Support both int and long caches because either might be an intptr_t,
238   // so they show up frequently in address computations.
239   enum { _icon_min = -1 * HeapWordSize,
240          _icon_max = 16 * HeapWordSize,
241          _lcon_min = _icon_min,
242          _lcon_max = _icon_max,
243          _zcon_max = (uint)T_CONFLICT
244   };
245   ConINode* _icons[_icon_max - _icon_min + 1];   // cached jint constant nodes
246   ConLNode* _lcons[_lcon_max - _lcon_min + 1];   // cached jlong constant nodes
247   ConNode*  _zcons[_zcon_max + 1];               // cached is_zero_type nodes
248   void init_con_caches();
249 
250 public:
251   PhaseValues() : PhaseTransform(GVN), _iterGVN(false),
252                   _table(*C->node_hash()), _types(*C->types())
253   {
254     NOT_PRODUCT( clear_new_values(); )
255     // Force allocation for currently existing nodes
256     _types.map(C->unique(), nullptr);
257     init_con_caches();
258   }
259   NOT_PRODUCT(~PhaseValues();)
260   PhaseIterGVN* is_IterGVN() { return (_iterGVN) ? (PhaseIterGVN*)this : nullptr; }
261 
262   // Some Ideal and other transforms delete --> modify --> insert values
263   bool   hash_delete(Node* n)     { return _table.hash_delete(n); }
264   void   hash_insert(Node* n)     { _table.hash_insert(n); }
265   Node*  hash_find_insert(Node* n){ return _table.hash_find_insert(n); }
266   Node*  hash_find(const Node* n) { return _table.hash_find(n); }
267 
268   // Used after parsing to eliminate values that are no longer in program
269   void   remove_useless_nodes(VectorSet &useful) {
270     _table.remove_useless_nodes(useful);
271     // this may invalidate cached cons so reset the cache
272     init_con_caches();
273   }
274 
275   Type_Array& types() {
276     return _types;
277   }
278 
279   // Get a previously recorded type for the node n.
280   // This type must already have been recorded.
281   // If you want the type of a very new (untransformed) node,
282   // you must use type_or_null, and test the result for null.
283   const Type* type(const Node* n) const {
284     assert(n != nullptr, "must not be null");
285     const Type* t = _types.fast_lookup(n->_idx);
286     assert(t != nullptr, "must set before get");
287     return t;
288   }
289   // Get a previously recorded type for the node n,
290   // or else return null if there is none.
291   const Type* type_or_null(const Node* n) const {
292     return _types.fast_lookup(n->_idx);
293   }
294   // Record a type for a node.
295   void    set_type(const Node* n, const Type *t) {
296     assert(t != nullptr, "type must not be null");
297     _types.map(n->_idx, t);
298   }
299   void    clear_type(const Node* n) {
300     if (n->_idx < _types.Size()) {
301       _types.map(n->_idx, nullptr);
302     }
303   }
304   // Record an initial type for a node, the node's bottom type.
305   void    set_type_bottom(const Node* n) {
306     // Use this for initialization when bottom_type() (or better) is not handy.
307     // Usually the initialization should be to n->Value(this) instead,
308     // or a hand-optimized value like Type::MEMORY or Type::CONTROL.
309     assert(_types[n->_idx] == nullptr, "must set the initial type just once");
310     _types.map(n->_idx, n->bottom_type());
311   }
312   // Make sure the types array is big enough to record a size for the node n.
313   // (In product builds, we never want to do range checks on the types array!)
314   void ensure_type_or_null(const Node* n) {
315     if (n->_idx >= _types.Size())
316       _types.map(n->_idx, nullptr);   // Grow the types array as needed.
317   }
318 
319   // Utility functions:
320   const TypeInt*  find_int_type( Node* n);
321   const TypeLong* find_long_type(Node* n);
322   jint  find_int_con( Node* n, jint  value_if_unknown) {
323     const TypeInt* t = find_int_type(n);
324     return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
325   }
326   jlong find_long_con(Node* n, jlong value_if_unknown) {
327     const TypeLong* t = find_long_type(n);
328     return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
329   }
330 
331   // Make an idealized constant, i.e., one of ConINode, ConPNode, ConFNode, etc.
332   // Same as transform(ConNode::make(t)).
333   ConNode* makecon(const Type* t);
334   ConNode* uncached_makecon(const Type* t);
335 
336   // Fast int or long constant.  Same as TypeInt::make(i) or TypeLong::make(l).
337   ConINode* intcon(jint i);
338   ConLNode* longcon(jlong l);
339   ConNode* integercon(jlong l, BasicType bt);
340 
341   // Fast zero or null constant.  Same as makecon(Type::get_zero_type(bt)).
342   ConNode* zerocon(BasicType bt);
343 
344   // For pessimistic passes, the return type must monotonically narrow.
345   // For optimistic  passes, the return type must monotonically widen.
346   // It is possible to get into a "death march" in either type of pass,
347   // where the types are continually moving but it will take 2**31 or
348   // more steps to converge.  This doesn't happen on most normal loops.
349   //
350   // Here is an example of a deadly loop for an optimistic pass, along
351   // with a partial trace of inferred types:
352   //    x = phi(0,x'); L: x' = x+1; if (x' >= 0) goto L;
353   //    0                 1                join([0..max], 1)
354   //    [0..1]            [1..2]           join([0..max], [1..2])
355   //    [0..2]            [1..3]           join([0..max], [1..3])
356   //      ... ... ...
357   //    [0..max]          [min]u[1..max]   join([0..max], [min..max])
358   //    [0..max] ==> fixpoint
359   // We would have proven, the hard way, that the iteration space is all
360   // non-negative ints, with the loop terminating due to 32-bit overflow.
361   //
362   // Here is the corresponding example for a pessimistic pass:
363   //    x = phi(0,x'); L: x' = x-1; if (x' >= 0) goto L;
364   //    int               int              join([0..max], int)
365   //    [0..max]          [-1..max-1]      join([0..max], [-1..max-1])
366   //    [0..max-1]        [-1..max-2]      join([0..max], [-1..max-2])
367   //      ... ... ...
368   //    [0..1]            [-1..0]          join([0..max], [-1..0])
369   //    0                 -1               join([0..max], -1)
370   //    0 == fixpoint
371   // We would have proven, the hard way, that the iteration space is {0}.
372   // (Usually, other optimizations will make the "if (x >= 0)" fold up
373   // before we get into trouble.  But not always.)
374   //
375   // It's a pleasant thing to observe that the pessimistic pass
376   // will make short work of the optimistic pass's deadly loop,
377   // and vice versa.  That is a good example of the complementary
378   // purposes of the CCP (optimistic) vs. GVN (pessimistic) phases.
379   //
380   // In any case, only widen or narrow a few times before going to the
381   // correct flavor of top or bottom.
382   //
383   // This call only needs to be made once as the data flows around any
384   // given cycle.  We do it at Phis, and nowhere else.
385   // The types presented are the new type of a phi (computed by PhiNode::Value)
386   // and the previously computed type, last time the phi was visited.
387   //
388   // The third argument is upper limit for the saturated value,
389   // if the phase wishes to widen the new_type.
390   // If the phase is narrowing, the old type provides a lower limit.
391   // Caller guarantees that old_type and new_type are no higher than limit_type.
392   virtual const Type* saturate(const Type* new_type,
393                                const Type* old_type,
394                                const Type* limit_type) const {
395     return new_type;
396   }
397   virtual const Type* saturate_and_maybe_push_to_igvn_worklist(const TypeNode* n, const Type* new_type) {
398     return saturate(new_type, type_or_null(n), n->type());
399   }
400 
401 #ifndef PRODUCT
402   uint   _count_new_values;     // For profiling, count new values produced
403   void    inc_new_values()        { ++_count_new_values; }
404   void    clear_new_values()      { _count_new_values = 0; }
405   uint    made_new_values() const { return _count_new_values; }
406 #endif
407 };
408 
409 
410 //------------------------------PhaseGVN---------------------------------------
411 // Phase for performing local, pessimistic GVN-style optimizations.
412 class PhaseGVN : public PhaseValues {
413 protected:
414   bool is_dominator_helper(Node *d, Node *n, bool linear_only);
415 
416 public:
417   // Return a node which computes the same function as this node, but
418   // in a faster or cheaper fashion.
419   Node  *transform( Node *n );
420   Node  *transform_no_reclaim( Node *n );
421   virtual void record_for_igvn(Node *n) {
422     C->record_for_igvn(n);
423   }
424 
425   bool is_dominator(Node *d, Node *n) { return is_dominator_helper(d, n, true); }
426 
427   // Helper to call Node::Ideal() and BarrierSetC2::ideal_node().
428   Node* apply_ideal(Node* i, bool can_reshape);
429 
430 #ifdef ASSERT
431   void dump_infinite_loop_info(Node* n, const char* where);
432   // Check for a simple dead loop when a data node references itself.
433   void dead_loop_check(Node *n);
434 #endif
435 };
436 
437 //------------------------------PhaseIterGVN-----------------------------------
438 // Phase for iteratively performing local, pessimistic GVN-style optimizations.
439 // and ideal transformations on the graph.
440 class PhaseIterGVN : public PhaseGVN {
441 private:
442   bool _delay_transform;  // When true simply register the node when calling transform
443                           // instead of actually optimizing it
444 
445   // Idealize old Node 'n' with respect to its inputs and its value
446   virtual Node *transform_old( Node *a_node );
447 
448   // Subsume users of node 'old' into node 'nn'
449   void subsume_node( Node *old, Node *nn );
450 
451 protected:
452   // Shuffle worklist, for stress testing
453   void shuffle_worklist();
454 
455   virtual const Type* saturate(const Type* new_type, const Type* old_type,
456                                const Type* limit_type) const;
457   // Usually returns new_type.  Returns old_type if new_type is only a slight
458   // improvement, such that it would take many (>>10) steps to reach 2**32.
459 
460 public:
461 
462   PhaseIterGVN(PhaseIterGVN* igvn); // Used by CCP constructor
463   PhaseIterGVN(PhaseGVN* gvn); // Used after Parser
464 
465   // Reset IGVN from GVN: call deconstructor, and placement new.
466   // Achieves the same as the following (but without move constructors):
467   // igvn = PhaseIterGVN(gvn);
468   void reset_from_gvn(PhaseGVN* gvn) {
469     if (this != gvn) {
470       this->~PhaseIterGVN();
471       ::new (static_cast<void*>(this)) PhaseIterGVN(gvn);
472     }
473   }
474 
475   // Reset IGVN with another: call deconstructor, and placement new.
476   // Achieves the same as the following (but without move constructors):
477   // igvn = PhaseIterGVN(other);
478   void reset_from_igvn(PhaseIterGVN* other) {
479     if (this != other) {
480       this->~PhaseIterGVN();
481       ::new (static_cast<void*>(this)) PhaseIterGVN(other);
482     }
483   }
484 
485   // Idealize new Node 'n' with respect to its inputs and its value
486   virtual Node *transform( Node *a_node );
487   virtual void record_for_igvn(Node *n) { }
488 
489   // Iterative worklist. Reference to "C->igvn_worklist()".
490   Unique_Node_List &_worklist;
491 
492   // Given def-use info and an initial worklist, apply Node::Ideal,
493   // Node::Value, Node::Identity, hash-based value numbering, Node::Ideal_DU
494   // and dominator info to a fixed point.
495   void optimize();
496 #ifdef ASSERT
497   void verify_optimize();
498   bool verify_node_value(Node* n);
499 #endif
500 
501 #ifndef PRODUCT
502   void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type);
503   void init_verifyPhaseIterGVN();
504   void verify_PhaseIterGVN();
505 #endif
506 
507 #ifdef ASSERT
508   void dump_infinite_loop_info(Node* n, const char* where);
509   void trace_PhaseIterGVN_verbose(Node* n, int num_processed);
510 #endif
511 
512   // Register a new node with the iter GVN pass without transforming it.
513   // Used when we need to restructure a Region/Phi area and all the Regions
514   // and Phis need to complete this one big transform before any other
515   // transforms can be triggered on the region.
516   // Optional 'orig' is an earlier version of this node.
517   // It is significant only for debugging and profiling.
518   Node* register_new_node_with_optimizer(Node* n, Node* orig = nullptr);
519 
520   // Kill a globally dead Node.  All uses are also globally dead and are
521   // aggressively trimmed.
522   void remove_globally_dead_node( Node *dead );
523 
524   // Kill all inputs to a dead node, recursively making more dead nodes.
525   // The Node must be dead locally, i.e., have no uses.
526   void remove_dead_node( Node *dead ) {
527     assert(dead->outcnt() == 0 && !dead->is_top(), "node must be dead");
528     remove_globally_dead_node(dead);
529   }
530 
531   // Add users of 'n' to worklist
532   void add_users_to_worklist0( Node *n );
533   void add_users_to_worklist ( Node *n );
534 
535   // Replace old node with new one.
536   void replace_node( Node *old, Node *nn ) {
537     add_users_to_worklist(old);
538     hash_delete(old); // Yank from hash before hacking edges
539     subsume_node(old, nn);
540   }
541 
542   // Delayed node rehash: remove a node from the hash table and rehash it during
543   // next optimizing pass
544   void rehash_node_delayed(Node* n) {
545     hash_delete(n);
546     _worklist.push(n);
547   }
548 
549   // Replace ith edge of "n" with "in"
550   void replace_input_of(Node* n, int i, Node* in) {
551     rehash_node_delayed(n);
552     n->set_req_X(i, in, this);
553   }
554 
555   // Add "in" as input (req) of "n"
556   void add_input_to(Node* n, Node* in) {
557     rehash_node_delayed(n);
558     n->add_req(in);
559   }
560 
561   // Delete ith edge of "n"
562   void delete_input_of(Node* n, int i) {
563     rehash_node_delayed(n);
564     n->del_req(i);
565   }
566 
567   // Delete precedence edge i of "n"
568   void delete_precedence_of(Node* n, int i) {
569     rehash_node_delayed(n);
570     n->rm_prec(i);
571   }
572 
573   bool delay_transform() const { return _delay_transform; }
574 
575   void set_delay_transform(bool delay) {
576     _delay_transform = delay;
577   }
578 
579   void remove_speculative_types();
580   void check_no_speculative_types() {
581     _table.check_no_speculative_types();
582   }
583 
584   bool is_dominator(Node *d, Node *n) { return is_dominator_helper(d, n, false); }
585   bool no_dependent_zero_check(Node* n) const;
586 
587 #ifndef PRODUCT
588   static bool is_verify_def_use() {
589     // '-XX:VerifyIterativeGVN=1'
590     return (VerifyIterativeGVN % 10) == 1;
591   }
592   static bool is_verify_Value() {
593     // '-XX:VerifyIterativeGVN=10'
594     return ((VerifyIterativeGVN % 100) / 10) == 1;
595   }
596 protected:
597   // Sub-quadratic implementation of '-XX:VerifyIterativeGVN=1' (Use-Def verification).
598   julong _verify_counter;
599   julong _verify_full_passes;
600   enum { _verify_window_size = 30 };
601   Node* _verify_window[_verify_window_size];
602   void verify_step(Node* n);
603 #endif
604 };
605 
606 //------------------------------PhaseCCP---------------------------------------
607 // Phase for performing global Conditional Constant Propagation.
608 // Should be replaced with combined CCP & GVN someday.
609 class PhaseCCP : public PhaseIterGVN {
610   Unique_Node_List _root_and_safepoints;
611   // Non-recursive.  Use analysis to transform single Node.
612   virtual Node* transform_once(Node* n);
613 
614   Node* fetch_next_node(Unique_Node_List& worklist);
615   static void dump_type_and_node(const Node* n, const Type* t) PRODUCT_RETURN;
616 
617   void push_child_nodes_to_worklist(Unique_Node_List& worklist, Node* n) const;
618   void push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const;
619   void push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const;
620   void push_phis(Unique_Node_List& worklist, const Node* use) const;
621   static void push_catch(Unique_Node_List& worklist, const Node* use);
622   void push_cmpu(Unique_Node_List& worklist, const Node* use) const;
623   static void push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use);
624   void push_loadp(Unique_Node_List& worklist, const Node* use) const;
625   static void push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use);
626   void push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const;
627   void push_cast_ii(Unique_Node_List& worklist, const Node* parent, const Node* use) const;
628   void push_opaque_zero_trip_guard(Unique_Node_List& worklist, const Node* use) const;
629 
630  public:
631   PhaseCCP( PhaseIterGVN *igvn ); // Compute conditional constants
632   NOT_PRODUCT( ~PhaseCCP(); )
633 
634   // Worklist algorithm identifies constants
635   void analyze();
636 #ifdef ASSERT
637   void verify_type(Node* n, const Type* tnew, const Type* told);
638   // For every node n on verify list, check if type(n) == n->Value()
639   void verify_analyze(Unique_Node_List& worklist_verify);
640 #endif
641   // Recursive traversal of program.  Used analysis to modify program.
642   virtual Node *transform( Node *n );
643   // Do any transformation after analysis
644   void          do_transform();
645 
646   virtual const Type* saturate(const Type* new_type, const Type* old_type,
647                                const Type* limit_type) const;
648   // Returns new_type->widen(old_type), which increments the widen bits until
649   // giving up with TypeInt::INT or TypeLong::LONG.
650   // Result is clipped to limit_type if necessary.
651   virtual const Type* saturate_and_maybe_push_to_igvn_worklist(const TypeNode* n, const Type* new_type) {
652     const Type* t = saturate(new_type, type_or_null(n), n->type());
653     if (t != new_type) {
654       // Type was widened in CCP, but IGVN may be able to make it narrower.
655       _worklist.push((Node*)n);
656     }
657     return t;
658   }
659 
660 #ifndef PRODUCT
661   static uint _total_invokes;    // For profiling, count invocations
662   void    inc_invokes()          { ++PhaseCCP::_total_invokes; }
663 
664   static uint _total_constants;  // For profiling, count constants found
665   uint   _count_constants;
666   void    clear_constants()      { _count_constants = 0; }
667   void    inc_constants()        { ++_count_constants; }
668   uint    count_constants() const { return _count_constants; }
669 
670   static void print_statistics();
671 #endif
672 };
673 
674 
675 //------------------------------PhasePeephole----------------------------------
676 // Phase for performing peephole optimizations on register allocated basic blocks.
677 class PhasePeephole : public PhaseTransform {
678   PhaseRegAlloc *_regalloc;
679   PhaseCFG     &_cfg;
680   // Recursive traversal of program.  Pure function is unused in this phase
681   virtual Node *transform( Node *n );
682 
683 public:
684   PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg );
685   NOT_PRODUCT( ~PhasePeephole(); )
686 
687   // Do any transformation after analysis
688   void          do_transform();
689 
690 #ifndef PRODUCT
691   static uint _total_peepholes;  // For profiling, count peephole rules applied
692   uint   _count_peepholes;
693   void    clear_peepholes()      { _count_peepholes = 0; }
694   void    inc_peepholes()        { ++_count_peepholes; }
695   uint    count_peepholes() const { return _count_peepholes; }
696 
697   static void print_statistics();
698 #endif
699 };
700 
701 #endif // SHARE_OPTO_PHASEX_HPP