1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_PHASEX_HPP
26 #define SHARE_OPTO_PHASEX_HPP
27
28 #include "libadt/dict.hpp"
29 #include "libadt/vectset.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "opto/memnode.hpp"
32 #include "opto/node.hpp"
33 #include "opto/phase.hpp"
34 #include "opto/type.hpp"
35 #include "utilities/globalDefinitions.hpp"
36
37 class BarrierSetC2;
38 class Compile;
39 class ConINode;
40 class ConLNode;
41 class Node;
42 class Type;
43 class PhaseTransform;
44 class PhaseGVN;
45 class PhaseIterGVN;
46 class PhaseCCP;
47 class PhasePeephole;
48 class PhaseRegAlloc;
49
50
51 //-----------------------------------------------------------------------------
52 // Expandable closed hash-table of nodes, initialized to null.
53 // Note that the constructor just zeros things
54 // Storage is reclaimed when the Arena's lifetime is over.
55 class NodeHash : public AnyObj {
56 protected:
57 Arena *_a; // Arena to allocate in
58 uint _max; // Size of table (power of 2)
59 uint _inserts; // For grow and debug, count of hash_inserts
60 uint _insert_limit; // 'grow' when _inserts reaches _insert_limit
61 Node **_table; // Hash table of Node pointers
62 Node *_sentinel; // Replaces deleted entries in hash table
63
64 public:
65 NodeHash(Arena *arena, uint est_max_size);
66 #ifdef ASSERT
67 ~NodeHash(); // Unlock all nodes upon destruction of table.
68 #endif
69 Node *hash_find(const Node*);// Find an equivalent version in hash table
70 Node *hash_find_insert(Node*);// If not in table insert else return found node
71 void hash_insert(Node*); // Insert into hash table
72 bool hash_delete(const Node*);// Replace with _sentinel in hash table
73 void check_grow() {
74 _inserts++;
75 if( _inserts == _insert_limit ) { grow(); }
76 assert( _inserts <= _insert_limit, "hash table overflow");
77 assert( _inserts < _max, "hash table overflow" );
78 }
79 static uint round_up(uint); // Round up to nearest power of 2
80 void grow(); // Grow _table to next power of 2 and rehash
81 // Return 75% of _max, rounded up.
82 uint insert_limit() const { return _max - (_max>>2); }
83
84 void clear(); // Set all entries to null, keep storage.
85 // Size of hash table
86 uint size() const { return _max; }
87 // Return Node* at index in table
88 Node *at(uint table_index) {
89 assert(table_index < _max, "Must be within table");
90 return _table[table_index];
91 }
92
93 void remove_useless_nodes(VectorSet& useful); // replace with sentinel
94 void check_no_speculative_types(); // Check no speculative part for type nodes in table
95
96 Node *sentinel() { return _sentinel; }
97
98 #ifndef PRODUCT
99 Node *find_index(uint idx); // For debugging
100 void dump(); // For debugging, dump statistics
101 uint _grows; // For debugging, count of table grow()s
102 uint _look_probes; // For debugging, count of hash probes
103 uint _lookup_hits; // For debugging, count of hash_finds
104 uint _lookup_misses; // For debugging, count of hash_finds
105 uint _insert_probes; // For debugging, count of hash probes
106 uint _delete_probes; // For debugging, count of hash probes for deletes
107 uint _delete_hits; // For debugging, count of hash probes for deletes
108 uint _delete_misses; // For debugging, count of hash probes for deletes
109 uint _total_inserts; // For debugging, total inserts into hash table
110 uint _total_insert_probes; // For debugging, total probes while inserting
111 #endif
112 NONCOPYABLE(NodeHash);
113 };
114
115
116 //-----------------------------------------------------------------------------
117 // Map dense integer indices to Types. Uses classic doubling-array trick.
118 // Abstractly provides an infinite array of Type*'s, initialized to null.
119 // Note that the constructor just zeros things, and since I use Arena
120 // allocation I do not need a destructor to reclaim storage.
121 // Despite the general name, this class is customized for use by PhaseValues.
122 class Type_Array : public AnyObj {
123 Arena *_a; // Arena to allocate in
124 uint _max;
125 const Type **_types;
126 void grow( uint i ); // Grow array node to fit
127 public:
128 Type_Array(Arena *a) : _a(a), _max(0), _types(nullptr) {}
129 const Type *operator[] ( uint i ) const // Lookup, or null for not mapped
130 { return (i<_max) ? _types[i] : (Type*)nullptr; }
131 const Type *fast_lookup(uint i) const{assert(i<_max,"oob");return _types[i];}
132 // Extend the mapping: index i maps to Type *n.
133 void map( uint i, const Type *n ) { if( i>=_max ) grow(i); _types[i] = n; }
134 uint Size() const { return _max; }
135 #ifndef PRODUCT
136 void dump() const;
137 #endif
138 void swap(Type_Array &other) {
139 if (this != &other) {
140 assert(_a == other._a, "swapping for differing arenas is probably a bad idea");
141 ::swap(_max, other._max);
142 ::swap(_types, other._types);
143 }
144 }
145 NONCOPYABLE(Type_Array);
146 };
147
148
149 //------------------------------PhaseRemoveUseless-----------------------------
150 // Remove useless nodes from GVN hash-table, worklist, and graph
151 class PhaseRemoveUseless : public Phase {
152 protected:
153 Unique_Node_List _useful; // Nodes reachable from root
154 // list is allocated from current resource area
155 public:
156 PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List& worklist, PhaseNumber phase_num = Remove_Useless);
157
158 Unique_Node_List *get_useful() { return &_useful; }
159 };
160
161 //------------------------------PhaseRenumber----------------------------------
162 // Phase that first performs a PhaseRemoveUseless, then it renumbers compiler
163 // structures accordingly.
164 class PhaseRenumberLive : public PhaseRemoveUseless {
165 protected:
166 Type_Array _new_type_array; // Storage for the updated type information.
167 GrowableArray<int> _old2new_map;
168 Node_List _delayed;
169 bool _is_pass_finished;
170 uint _live_node_count;
171
172 int update_embedded_ids(Node* n);
173 int new_index(int old_idx);
174
175 public:
176 PhaseRenumberLive(PhaseGVN* gvn,
177 Unique_Node_List& worklist,
178 PhaseNumber phase_num = Remove_Useless_And_Renumber_Live);
179 };
180
181
182 //------------------------------PhaseTransform---------------------------------
183 // Phases that analyze, then transform. Constructing the Phase object does any
184 // global or slow analysis. The results are cached later for a fast
185 // transformation pass. When the Phase object is deleted the cached analysis
186 // results are deleted.
187 class PhaseTransform : public Phase {
188 public:
189 PhaseTransform(PhaseNumber pnum) : Phase(pnum) {
190 #ifndef PRODUCT
191 clear_progress();
192 clear_transforms();
193 set_allow_progress(true);
194 #endif
195 }
196
197 // Return a node which computes the same function as this node, but
198 // in a faster or cheaper fashion.
199 virtual Node *transform( Node *n ) = 0;
200
201 // true if CFG node d dominates CFG node n
202 virtual bool is_dominator(Node *d, Node *n) { fatal("unimplemented for this pass"); return false; };
203
204 #ifndef PRODUCT
205 uint _count_progress; // For profiling, count transforms that make progress
206 void set_progress() { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); }
207 void clear_progress() { _count_progress = 0; }
208 uint made_progress() const { return _count_progress; }
209
210 uint _count_transforms; // For profiling, count transforms performed
211 void set_transforms() { ++_count_transforms; }
212 void clear_transforms() { _count_transforms = 0; }
213 uint made_transforms() const{ return _count_transforms; }
214
215 bool _allow_progress; // progress not allowed during verification pass
216 void set_allow_progress(bool allow) { _allow_progress = allow; }
217 bool allow_progress() { return _allow_progress; }
218 #endif
219 };
220
221 // Phase infrastructure required for Node::Value computations.
222 // 1) Type array, and accessor methods.
223 // 2) Constants cache, which requires access to the types.
224 // 3) NodeHash table, to find identical nodes (and remove/update the hash of a node on modification).
225 class PhaseValues : public PhaseTransform {
226 protected:
227 bool _iterGVN;
228
229 // Hash table for value-numbering. Reference to "C->node_hash()",
230 NodeHash &_table;
231
232 // Type array mapping node idx to Type*. Reference to "C->types()".
233 Type_Array &_types;
234
235 // ConNode caches:
236 // Support both int and long caches because either might be an intptr_t,
237 // so they show up frequently in address computations.
238 enum { _icon_min = -1 * HeapWordSize,
239 _icon_max = 16 * HeapWordSize,
240 _lcon_min = _icon_min,
241 _lcon_max = _icon_max,
242 _zcon_max = (uint)T_CONFLICT
243 };
244 ConINode* _icons[_icon_max - _icon_min + 1]; // cached jint constant nodes
245 ConLNode* _lcons[_lcon_max - _lcon_min + 1]; // cached jlong constant nodes
246 ConNode* _zcons[_zcon_max + 1]; // cached is_zero_type nodes
247 void init_con_caches();
248
249 public:
250 PhaseValues() : PhaseTransform(GVN), _iterGVN(false),
251 _table(*C->node_hash()), _types(*C->types())
252 {
253 NOT_PRODUCT( clear_new_values(); )
254 // Force allocation for currently existing nodes
255 _types.map(C->unique(), nullptr);
256 init_con_caches();
257 }
258 NOT_PRODUCT(~PhaseValues();)
259 PhaseIterGVN* is_IterGVN() { return (_iterGVN) ? (PhaseIterGVN*)this : nullptr; }
260
261 // Some Ideal and other transforms delete --> modify --> insert values
262 bool hash_delete(Node* n) { return _table.hash_delete(n); }
263 void hash_insert(Node* n) { _table.hash_insert(n); }
264 Node* hash_find_insert(Node* n){ return _table.hash_find_insert(n); }
265 Node* hash_find(const Node* n) { return _table.hash_find(n); }
266
267 // Used after parsing to eliminate values that are no longer in program
268 void remove_useless_nodes(VectorSet &useful) {
269 _table.remove_useless_nodes(useful);
270 // this may invalidate cached cons so reset the cache
271 init_con_caches();
272 }
273
274 Type_Array& types() {
275 return _types;
276 }
277
278 // Get a previously recorded type for the node n.
279 // This type must already have been recorded.
280 // If you want the type of a very new (untransformed) node,
281 // you must use type_or_null, and test the result for null.
282 const Type* type(const Node* n) const {
283 assert(n != nullptr, "must not be null");
284 const Type* t = _types.fast_lookup(n->_idx);
285 assert(t != nullptr, "must set before get");
286 return t;
287 }
288 // Get a previously recorded type for the node n,
289 // or else return null if there is none.
290 const Type* type_or_null(const Node* n) const {
291 return _types.fast_lookup(n->_idx);
292 }
293 // Record a type for a node.
294 void set_type(const Node* n, const Type *t) {
295 assert(t != nullptr, "type must not be null");
296 _types.map(n->_idx, t);
297 }
298 void clear_type(const Node* n) {
299 if (n->_idx < _types.Size()) {
300 _types.map(n->_idx, nullptr);
301 }
302 }
303 // Record an initial type for a node, the node's bottom type.
304 void set_type_bottom(const Node* n) {
305 // Use this for initialization when bottom_type() (or better) is not handy.
306 // Usually the initialization should be to n->Value(this) instead,
307 // or a hand-optimized value like Type::MEMORY or Type::CONTROL.
308 assert(_types[n->_idx] == nullptr, "must set the initial type just once");
309 _types.map(n->_idx, n->bottom_type());
310 }
311 // Make sure the types array is big enough to record a size for the node n.
312 // (In product builds, we never want to do range checks on the types array!)
313 void ensure_type_or_null(const Node* n) {
314 if (n->_idx >= _types.Size())
315 _types.map(n->_idx, nullptr); // Grow the types array as needed.
316 }
317
318 // Utility functions:
319 const TypeInt* find_int_type( Node* n);
320 const TypeLong* find_long_type(Node* n);
321 jint find_int_con( Node* n, jint value_if_unknown) {
322 const TypeInt* t = find_int_type(n);
323 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
324 }
325 jlong find_long_con(Node* n, jlong value_if_unknown) {
326 const TypeLong* t = find_long_type(n);
327 return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
328 }
329
330 // Make an idealized constant, i.e., one of ConINode, ConPNode, ConFNode, etc.
331 // Same as transform(ConNode::make(t)).
332 ConNode* makecon(const Type* t);
333 ConNode* uncached_makecon(const Type* t);
334
335 // Fast int or long constant. Same as TypeInt::make(i) or TypeLong::make(l).
336 ConINode* intcon(jint i);
337 ConLNode* longcon(jlong l);
338 ConNode* integercon(jlong l, BasicType bt);
339
340 // Fast zero or null constant. Same as makecon(Type::get_zero_type(bt)).
341 ConNode* zerocon(BasicType bt);
342
343 // For pessimistic passes, the return type must monotonically narrow.
344 // For optimistic passes, the return type must monotonically widen.
345 // It is possible to get into a "death march" in either type of pass,
346 // where the types are continually moving but it will take 2**31 or
347 // more steps to converge. This doesn't happen on most normal loops.
348 //
349 // Here is an example of a deadly loop for an optimistic pass, along
350 // with a partial trace of inferred types:
351 // x = phi(0,x'); L: x' = x+1; if (x' >= 0) goto L;
352 // 0 1 join([0..max], 1)
353 // [0..1] [1..2] join([0..max], [1..2])
354 // [0..2] [1..3] join([0..max], [1..3])
355 // ... ... ...
356 // [0..max] [min]u[1..max] join([0..max], [min..max])
357 // [0..max] ==> fixpoint
358 // We would have proven, the hard way, that the iteration space is all
359 // non-negative ints, with the loop terminating due to 32-bit overflow.
360 //
361 // Here is the corresponding example for a pessimistic pass:
362 // x = phi(0,x'); L: x' = x-1; if (x' >= 0) goto L;
363 // int int join([0..max], int)
364 // [0..max] [-1..max-1] join([0..max], [-1..max-1])
365 // [0..max-1] [-1..max-2] join([0..max], [-1..max-2])
366 // ... ... ...
367 // [0..1] [-1..0] join([0..max], [-1..0])
368 // 0 -1 join([0..max], -1)
369 // 0 == fixpoint
370 // We would have proven, the hard way, that the iteration space is {0}.
371 // (Usually, other optimizations will make the "if (x >= 0)" fold up
372 // before we get into trouble. But not always.)
373 //
374 // It's a pleasant thing to observe that the pessimistic pass
375 // will make short work of the optimistic pass's deadly loop,
376 // and vice versa. That is a good example of the complementary
377 // purposes of the CCP (optimistic) vs. GVN (pessimistic) phases.
378 //
379 // In any case, only widen or narrow a few times before going to the
380 // correct flavor of top or bottom.
381 //
382 // This call only needs to be made once as the data flows around any
383 // given cycle. We do it at Phis, and nowhere else.
384 // The types presented are the new type of a phi (computed by PhiNode::Value)
385 // and the previously computed type, last time the phi was visited.
386 //
387 // The third argument is upper limit for the saturated value,
388 // if the phase wishes to widen the new_type.
389 // If the phase is narrowing, the old type provides a lower limit.
390 // Caller guarantees that old_type and new_type are no higher than limit_type.
391 virtual const Type* saturate(const Type* new_type,
392 const Type* old_type,
393 const Type* limit_type) const {
394 return new_type;
395 }
396 virtual const Type* saturate_and_maybe_push_to_igvn_worklist(const TypeNode* n, const Type* new_type) {
397 return saturate(new_type, type_or_null(n), n->type());
398 }
399
400 #ifndef PRODUCT
401 uint _count_new_values; // For profiling, count new values produced
402 void inc_new_values() { ++_count_new_values; }
403 void clear_new_values() { _count_new_values = 0; }
404 uint made_new_values() const { return _count_new_values; }
405 #endif
406 };
407
408
409 //------------------------------PhaseGVN---------------------------------------
410 // Phase for performing local, pessimistic GVN-style optimizations.
411 class PhaseGVN : public PhaseValues {
412 protected:
413 bool is_dominator_helper(Node *d, Node *n, bool linear_only);
414
415 public:
416 // Return a node which computes the same function as this node, but
417 // in a faster or cheaper fashion.
418 Node* transform(Node* n);
419
420 virtual void record_for_igvn(Node *n) {
421 C->record_for_igvn(n);
422 }
423
424 bool is_dominator(Node *d, Node *n) { return is_dominator_helper(d, n, true); }
425
426 // Helper to call Node::Ideal() and BarrierSetC2::ideal_node().
427 Node* apply_ideal(Node* i, bool can_reshape);
428
429 #ifdef ASSERT
430 void dump_infinite_loop_info(Node* n, const char* where);
431 // Check for a simple dead loop when a data node references itself.
432 void dead_loop_check(Node *n);
433 #endif
434 };
435
436 //------------------------------PhaseIterGVN-----------------------------------
437 // Phase for iteratively performing local, pessimistic GVN-style optimizations.
438 // and ideal transformations on the graph.
439 class PhaseIterGVN : public PhaseGVN {
440 private:
441 bool _delay_transform; // When true simply register the node when calling transform
442 // instead of actually optimizing it
443
444 // Idealize old Node 'n' with respect to its inputs and its value
445 virtual Node *transform_old( Node *a_node );
446
447 // Subsume users of node 'old' into node 'nn'
448 void subsume_node( Node *old, Node *nn );
449
450 protected:
451 // Shuffle worklist, for stress testing
452 void shuffle_worklist();
453
454 virtual const Type* saturate(const Type* new_type, const Type* old_type,
455 const Type* limit_type) const;
456 // Usually returns new_type. Returns old_type if new_type is only a slight
457 // improvement, such that it would take many (>>10) steps to reach 2**32.
458
459 public:
460
461 PhaseIterGVN(PhaseIterGVN* igvn); // Used by CCP constructor
462 PhaseIterGVN();
463
464 // Reset IGVN: call deconstructor, and placement new.
465 void reset() {
466 this->~PhaseIterGVN();
467 ::new (static_cast<void*>(this)) PhaseIterGVN();
468 }
469
470 // Reset IGVN with another: call deconstructor, and placement new.
471 // Achieves the same as the following (but without move constructors):
472 // igvn = PhaseIterGVN(other);
473 void reset_from_igvn(PhaseIterGVN* other) {
474 if (this != other) {
475 this->~PhaseIterGVN();
476 ::new (static_cast<void*>(this)) PhaseIterGVN(other);
477 }
478 }
479
480 // Idealize new Node 'n' with respect to its inputs and its value
481 virtual Node *transform( Node *a_node );
482 virtual void record_for_igvn(Node *n) { _worklist.push(n); }
483
484 // Iterative worklist. Reference to "C->igvn_worklist()".
485 Unique_Node_List &_worklist;
486
487 // Given def-use info and an initial worklist, apply Node::Ideal,
488 // Node::Value, Node::Identity, hash-based value numbering, Node::Ideal_DU
489 // and dominator info to a fixed point.
490 void optimize();
491 #ifdef ASSERT
492 void verify_optimize();
493 bool verify_Value_for(Node* n);
494 bool verify_Ideal_for(Node* n, bool can_reshape);
495 bool verify_Identity_for(Node* n);
496 void verify_empty_worklist(Node* n);
497 #endif
498
499 #ifndef PRODUCT
500 void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type);
501 void init_verifyPhaseIterGVN();
502 void verify_PhaseIterGVN();
503 #endif
504
505 #ifdef ASSERT
506 void dump_infinite_loop_info(Node* n, const char* where);
507 void trace_PhaseIterGVN_verbose(Node* n, int num_processed);
508 #endif
509
510 // Register a new node with the iter GVN pass without transforming it.
511 // Used when we need to restructure a Region/Phi area and all the Regions
512 // and Phis need to complete this one big transform before any other
513 // transforms can be triggered on the region.
514 // Optional 'orig' is an earlier version of this node.
515 // It is significant only for debugging and profiling.
516 Node* register_new_node_with_optimizer(Node* n, Node* orig = nullptr);
517
518 // Kill a globally dead Node. All uses are also globally dead and are
519 // aggressively trimmed.
520 void remove_globally_dead_node( Node *dead );
521
522 // Kill all inputs to a dead node, recursively making more dead nodes.
523 // The Node must be dead locally, i.e., have no uses.
524 void remove_dead_node( Node *dead ) {
525 assert(dead->outcnt() == 0 && !dead->is_top(), "node must be dead");
526 remove_globally_dead_node(dead);
527 }
528
529 // Add users of 'n' to worklist
530 static void add_users_to_worklist0(Node* n, Unique_Node_List& worklist);
531 static void add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist);
532 void add_users_to_worklist(Node* n);
533
534 // Replace old node with new one.
535 void replace_node( Node *old, Node *nn ) {
536 add_users_to_worklist(old);
537 hash_delete(old); // Yank from hash before hacking edges
538 subsume_node(old, nn);
539 }
540
541 void replace_in_uses(Node* n, Node* m);
542
543 // Delayed node rehash: remove a node from the hash table and rehash it during
544 // next optimizing pass
545 void rehash_node_delayed(Node* n) {
546 hash_delete(n);
547 _worklist.push(n);
548 }
549
550 // Replace ith edge of "n" with "in"
551 void replace_input_of(Node* n, uint i, Node* in) {
552 rehash_node_delayed(n);
553 n->set_req_X(i, in, this);
554 }
555
556 // Add "in" as input (req) of "n"
557 void add_input_to(Node* n, Node* in) {
558 rehash_node_delayed(n);
559 n->add_req(in);
560 }
561
562 // Delete ith edge of "n"
563 void delete_input_of(Node* n, uint i) {
564 rehash_node_delayed(n);
565 n->del_req(i);
566 }
567
568 // Delete precedence edge i of "n"
569 void delete_precedence_of(Node* n, uint i) {
570 rehash_node_delayed(n);
571 n->rm_prec(i);
572 }
573
574 bool delay_transform() const { return _delay_transform; }
575
576 void set_delay_transform(bool delay) {
577 _delay_transform = delay;
578 }
579
580 void remove_speculative_types();
581 void check_no_speculative_types() {
582 _table.check_no_speculative_types();
583 }
584
585 bool is_dominator(Node *d, Node *n) { return is_dominator_helper(d, n, false); }
586 bool no_dependent_zero_check(Node* n) const;
587
588 #ifndef PRODUCT
589 static bool is_verify_def_use() {
590 // '-XX:VerifyIterativeGVN=1'
591 return (VerifyIterativeGVN % 10) == 1;
592 }
593 static bool is_verify_Value() {
594 // '-XX:VerifyIterativeGVN=10'
595 return ((VerifyIterativeGVN % 100) / 10) == 1;
596 }
597 static bool is_verify_Ideal() {
598 // '-XX:VerifyIterativeGVN=100'
599 return ((VerifyIterativeGVN % 1000) / 100) == 1;
600 }
601 static bool is_verify_Identity() {
602 // '-XX:VerifyIterativeGVN=1000'
603 return ((VerifyIterativeGVN % 10000) / 1000) == 1;
604 }
605 protected:
606 // Sub-quadratic implementation of '-XX:VerifyIterativeGVN=1' (Use-Def verification).
607 julong _verify_counter;
608 julong _verify_full_passes;
609 enum { _verify_window_size = 30 };
610 Node* _verify_window[_verify_window_size];
611 void verify_step(Node* n);
612 #endif
613 };
614
615 //------------------------------PhaseCCP---------------------------------------
616 // Phase for performing global Conditional Constant Propagation.
617 // Should be replaced with combined CCP & GVN someday.
618 class PhaseCCP : public PhaseIterGVN {
619 Unique_Node_List _root_and_safepoints;
620 Unique_Node_List _maybe_top_type_nodes;
621 // Non-recursive. Use analysis to transform single Node.
622 virtual Node* transform_once(Node* n);
623
624 Node* fetch_next_node(Unique_Node_List& worklist);
625 static void dump_type_and_node(const Node* n, const Type* t) PRODUCT_RETURN;
626
627 void push_child_nodes_to_worklist(Unique_Node_List& worklist, Node* n) const;
628 void push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const;
629 void push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const;
630 void push_phis(Unique_Node_List& worklist, const Node* use) const;
631 static void push_catch(Unique_Node_List& worklist, const Node* use);
632 void push_cmpu(Unique_Node_List& worklist, const Node* use) const;
633 static void push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use);
634 static void push_cast(Unique_Node_List& worklist, const Node* use);
635 void push_loadp(Unique_Node_List& worklist, const Node* use) const;
636 static void push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use);
637 void push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const;
638 void push_cast_ii(Unique_Node_List& worklist, const Node* parent, const Node* use) const;
639 void push_opaque_zero_trip_guard(Unique_Node_List& worklist, const Node* use) const;
640 void push_bool_with_cmpu_and_mask(Unique_Node_List& worklist, const Node* use) const;
641 void push_bool_matching_case1b(Unique_Node_List& worklist, const Node* cmpu) const;
642
643 public:
644 PhaseCCP( PhaseIterGVN *igvn ); // Compute conditional constants
645 NOT_PRODUCT( ~PhaseCCP(); )
646
647 // Worklist algorithm identifies constants
648 void analyze();
649 #ifdef ASSERT
650 void verify_type(Node* n, const Type* tnew, const Type* told);
651 // For every node n on verify list, check if type(n) == n->Value()
652 void verify_analyze(Unique_Node_List& worklist_verify);
653 #endif
654 // Recursive traversal of program. Used analysis to modify program.
655 virtual Node *transform( Node *n );
656 // Do any transformation after analysis
657 void do_transform();
658
659 virtual const Type* saturate(const Type* new_type, const Type* old_type,
660 const Type* limit_type) const;
661 // Returns new_type->widen(old_type), which increments the widen bits until
662 // giving up with TypeInt::INT or TypeLong::LONG.
663 // Result is clipped to limit_type if necessary.
664 virtual const Type* saturate_and_maybe_push_to_igvn_worklist(const TypeNode* n, const Type* new_type) {
665 const Type* t = saturate(new_type, type_or_null(n), n->type());
666 if (t != new_type) {
667 // Type was widened in CCP, but IGVN may be able to make it narrower.
668 _worklist.push((Node*)n);
669 }
670 return t;
671 }
672
673 #ifndef PRODUCT
674 static uint _total_invokes; // For profiling, count invocations
675 void inc_invokes() { ++PhaseCCP::_total_invokes; }
676
677 static uint _total_constants; // For profiling, count constants found
678 uint _count_constants;
679 void clear_constants() { _count_constants = 0; }
680 void inc_constants() { ++_count_constants; }
681 uint count_constants() const { return _count_constants; }
682
683 static void print_statistics();
684 #endif
685 };
686
687
688 //------------------------------PhasePeephole----------------------------------
689 // Phase for performing peephole optimizations on register allocated basic blocks.
690 class PhasePeephole : public PhaseTransform {
691 PhaseRegAlloc *_regalloc;
692 PhaseCFG &_cfg;
693 // Recursive traversal of program. Pure function is unused in this phase
694 virtual Node *transform( Node *n );
695
696 public:
697 PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg );
698 NOT_PRODUCT( ~PhasePeephole(); )
699
700 // Do any transformation after analysis
701 void do_transform();
702
703 #ifndef PRODUCT
704 static uint _total_peepholes; // For profiling, count peephole rules applied
705 uint _count_peepholes;
706 void clear_peepholes() { _count_peepholes = 0; }
707 void inc_peepholes() { ++_count_peepholes; }
708 uint count_peepholes() const { return _count_peepholes; }
709
710 static void print_statistics();
711 #endif
712 };
713
714 #endif // SHARE_OPTO_PHASEX_HPP