1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/block.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/idealGraphPrinter.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/opcodes.hpp"
39 #include "opto/phaseX.hpp"
40 #include "opto/regalloc.hpp"
41 #include "opto/rootnode.hpp"
42 #include "utilities/macros.hpp"
43 #include "utilities/powerOfTwo.hpp"
44
45 //=============================================================================
46 #define NODE_HASH_MINIMUM_SIZE 255
47
48 //------------------------------NodeHash---------------------------------------
49 NodeHash::NodeHash(Arena *arena, uint est_max_size) :
50 _a(arena),
51 _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
52 _inserts(0), _insert_limit( insert_limit() ),
53 _table( NEW_ARENA_ARRAY( _a , Node* , _max ) )
54 #ifndef PRODUCT
55 , _grows(0),_look_probes(0), _lookup_hits(0), _lookup_misses(0),
56 _insert_probes(0), _delete_probes(0), _delete_hits(0), _delete_misses(0),
57 _total_inserts(0), _total_insert_probes(0)
58 #endif
59 {
60 // _sentinel must be in the current node space
61 _sentinel = new ProjNode(nullptr, TypeFunc::Control);
62 memset(_table,0,sizeof(Node*)*_max);
63 }
64
65 //------------------------------hash_find--------------------------------------
66 // Find in hash table
67 Node *NodeHash::hash_find( const Node *n ) {
68 // ((Node*)n)->set_hash( n->hash() );
69 uint hash = n->hash();
70 if (hash == Node::NO_HASH) {
71 NOT_PRODUCT( _lookup_misses++ );
72 return nullptr;
73 }
74 uint key = hash & (_max-1);
75 uint stride = key | 0x01;
76 NOT_PRODUCT( _look_probes++ );
77 Node *k = _table[key]; // Get hashed value
78 if( !k ) { // ?Miss?
79 NOT_PRODUCT( _lookup_misses++ );
80 return nullptr; // Miss!
81 }
82
83 int op = n->Opcode();
84 uint req = n->req();
85 while( 1 ) { // While probing hash table
86 if( k->req() == req && // Same count of inputs
87 k->Opcode() == op ) { // Same Opcode
88 for( uint i=0; i<req; i++ )
89 if( n->in(i)!=k->in(i)) // Different inputs?
90 goto collision; // "goto" is a speed hack...
91 if( n->cmp(*k) ) { // Check for any special bits
92 NOT_PRODUCT( _lookup_hits++ );
93 return k; // Hit!
94 }
95 }
96 collision:
97 NOT_PRODUCT( _look_probes++ );
98 key = (key + stride/*7*/) & (_max-1); // Stride through table with relative prime
99 k = _table[key]; // Get hashed value
100 if( !k ) { // ?Miss?
101 NOT_PRODUCT( _lookup_misses++ );
102 return nullptr; // Miss!
103 }
104 }
105 ShouldNotReachHere();
106 return nullptr;
107 }
108
109 //------------------------------hash_find_insert-------------------------------
110 // Find in hash table, insert if not already present
111 // Used to preserve unique entries in hash table
112 Node *NodeHash::hash_find_insert( Node *n ) {
113 // n->set_hash( );
114 uint hash = n->hash();
115 if (hash == Node::NO_HASH) {
116 NOT_PRODUCT( _lookup_misses++ );
117 return nullptr;
118 }
119 uint key = hash & (_max-1);
120 uint stride = key | 0x01; // stride must be relatively prime to table siz
121 uint first_sentinel = 0; // replace a sentinel if seen.
122 NOT_PRODUCT( _look_probes++ );
123 Node *k = _table[key]; // Get hashed value
124 if( !k ) { // ?Miss?
125 NOT_PRODUCT( _lookup_misses++ );
126 _table[key] = n; // Insert into table!
127 DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table.
128 check_grow(); // Grow table if insert hit limit
129 return nullptr; // Miss!
130 }
131 else if( k == _sentinel ) {
132 first_sentinel = key; // Can insert here
133 }
134
135 int op = n->Opcode();
136 uint req = n->req();
137 while( 1 ) { // While probing hash table
138 if( k->req() == req && // Same count of inputs
139 k->Opcode() == op ) { // Same Opcode
140 for( uint i=0; i<req; i++ )
141 if( n->in(i)!=k->in(i)) // Different inputs?
142 goto collision; // "goto" is a speed hack...
143 if( n->cmp(*k) ) { // Check for any special bits
144 NOT_PRODUCT( _lookup_hits++ );
145 return k; // Hit!
146 }
147 }
148 collision:
149 NOT_PRODUCT( _look_probes++ );
150 key = (key + stride) & (_max-1); // Stride through table w/ relative prime
151 k = _table[key]; // Get hashed value
152 if( !k ) { // ?Miss?
153 NOT_PRODUCT( _lookup_misses++ );
154 key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel?
155 _table[key] = n; // Insert into table!
156 DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table.
157 check_grow(); // Grow table if insert hit limit
158 return nullptr; // Miss!
159 }
160 else if( first_sentinel == 0 && k == _sentinel ) {
161 first_sentinel = key; // Can insert here
162 }
163
164 }
165 ShouldNotReachHere();
166 return nullptr;
167 }
168
169 //------------------------------hash_insert------------------------------------
170 // Insert into hash table
171 void NodeHash::hash_insert( Node *n ) {
172 // // "conflict" comments -- print nodes that conflict
173 // bool conflict = false;
174 // n->set_hash();
175 uint hash = n->hash();
176 if (hash == Node::NO_HASH) {
177 return;
178 }
179 check_grow();
180 uint key = hash & (_max-1);
181 uint stride = key | 0x01;
182
183 while( 1 ) { // While probing hash table
184 NOT_PRODUCT( _insert_probes++ );
185 Node *k = _table[key]; // Get hashed value
186 if( !k || (k == _sentinel) ) break; // Found a slot
187 assert( k != n, "already inserted" );
188 // if( PrintCompilation && PrintOptoStatistics && Verbose ) { tty->print(" conflict: "); k->dump(); conflict = true; }
189 key = (key + stride) & (_max-1); // Stride through table w/ relative prime
190 }
191 _table[key] = n; // Insert into table!
192 DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table.
193 // if( conflict ) { n->dump(); }
194 }
195
196 //------------------------------hash_delete------------------------------------
197 // Replace in hash table with sentinel
198 bool NodeHash::hash_delete( const Node *n ) {
199 Node *k;
200 uint hash = n->hash();
201 if (hash == Node::NO_HASH) {
202 NOT_PRODUCT( _delete_misses++ );
203 return false;
204 }
205 uint key = hash & (_max-1);
206 uint stride = key | 0x01;
207 DEBUG_ONLY( uint counter = 0; );
208 for( ; /* (k != nullptr) && (k != _sentinel) */; ) {
209 DEBUG_ONLY( counter++ );
210 NOT_PRODUCT( _delete_probes++ );
211 k = _table[key]; // Get hashed value
212 if( !k ) { // Miss?
213 NOT_PRODUCT( _delete_misses++ );
214 return false; // Miss! Not in chain
215 }
216 else if( n == k ) {
217 NOT_PRODUCT( _delete_hits++ );
218 _table[key] = _sentinel; // Hit! Label as deleted entry
219 DEBUG_ONLY(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table.
220 return true;
221 }
222 else {
223 // collision: move through table with prime offset
224 key = (key + stride/*7*/) & (_max-1);
225 assert( counter <= _insert_limit, "Cycle in hash-table");
226 }
227 }
228 ShouldNotReachHere();
229 return false;
230 }
231
232 //------------------------------round_up---------------------------------------
233 // Round up to nearest power of 2
234 uint NodeHash::round_up(uint x) {
235 x += (x >> 2); // Add 25% slop
236 return MAX2(16U, round_up_power_of_2(x));
237 }
238
239 //------------------------------grow-------------------------------------------
240 // Grow _table to next power of 2 and insert old entries
241 void NodeHash::grow() {
242 // Record old state
243 uint old_max = _max;
244 Node **old_table = _table;
245 // Construct new table with twice the space
246 #ifndef PRODUCT
247 _grows++;
248 _total_inserts += _inserts;
249 _total_insert_probes += _insert_probes;
250 _insert_probes = 0;
251 #endif
252 _inserts = 0;
253 _max = _max << 1;
254 _table = NEW_ARENA_ARRAY( _a , Node* , _max ); // (Node**)_a->Amalloc( _max * sizeof(Node*) );
255 memset(_table,0,sizeof(Node*)*_max);
256 _insert_limit = insert_limit();
257 // Insert old entries into the new table
258 for( uint i = 0; i < old_max; i++ ) {
259 Node *m = *old_table++;
260 if( !m || m == _sentinel ) continue;
261 DEBUG_ONLY(m->exit_hash_lock()); // Unlock the node upon removal from old table.
262 hash_insert(m);
263 }
264 }
265
266 //------------------------------clear------------------------------------------
267 // Clear all entries in _table to null but keep storage
268 void NodeHash::clear() {
269 #ifdef ASSERT
270 // Unlock all nodes upon removal from table.
271 for (uint i = 0; i < _max; i++) {
272 Node* n = _table[i];
273 if (!n || n == _sentinel) continue;
274 n->exit_hash_lock();
275 }
276 #endif
277
278 memset( _table, 0, _max * sizeof(Node*) );
279 }
280
281 //-----------------------remove_useless_nodes----------------------------------
282 // Remove useless nodes from value table,
283 // implementation does not depend on hash function
284 void NodeHash::remove_useless_nodes(VectorSet &useful) {
285
286 // Dead nodes in the hash table inherited from GVN should not replace
287 // existing nodes, remove dead nodes.
288 uint max = size();
289 Node *sentinel_node = sentinel();
290 for( uint i = 0; i < max; ++i ) {
291 Node *n = at(i);
292 if(n != nullptr && n != sentinel_node && !useful.test(n->_idx)) {
293 DEBUG_ONLY(n->exit_hash_lock()); // Unlock the node when removed
294 _table[i] = sentinel_node; // Replace with placeholder
295 }
296 }
297 }
298
299
300 void NodeHash::check_no_speculative_types() {
301 #ifdef ASSERT
302 uint max = size();
303 Unique_Node_List live_nodes;
304 Compile::current()->identify_useful_nodes(live_nodes);
305 Node *sentinel_node = sentinel();
306 for (uint i = 0; i < max; ++i) {
307 Node *n = at(i);
308 if (n != nullptr &&
309 n != sentinel_node &&
310 n->is_Type() &&
311 live_nodes.member(n)) {
312 TypeNode* tn = n->as_Type();
313 const Type* t = tn->type();
314 const Type* t_no_spec = t->remove_speculative();
315 assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup");
316 }
317 }
318 #endif
319 }
320
321 #ifndef PRODUCT
322 //------------------------------dump-------------------------------------------
323 // Dump statistics for the hash table
324 void NodeHash::dump() {
325 _total_inserts += _inserts;
326 _total_insert_probes += _insert_probes;
327 if (PrintCompilation && PrintOptoStatistics && Verbose && (_inserts > 0)) {
328 if (WizardMode) {
329 for (uint i=0; i<_max; i++) {
330 if (_table[i])
331 tty->print("%d/%d/%d ",i,_table[i]->hash()&(_max-1),_table[i]->_idx);
332 }
333 }
334 tty->print("\nGVN Hash stats: %d grows to %d max_size\n", _grows, _max);
335 tty->print(" %d/%d (%8.1f%% full)\n", _inserts, _max, (double)_inserts/_max*100.0);
336 tty->print(" %dp/(%dh+%dm) (%8.2f probes/lookup)\n", _look_probes, _lookup_hits, _lookup_misses, (double)_look_probes/(_lookup_hits+_lookup_misses));
337 tty->print(" %dp/%di (%8.2f probes/insert)\n", _total_insert_probes, _total_inserts, (double)_total_insert_probes/_total_inserts);
338 // sentinels increase lookup cost, but not insert cost
339 assert((_lookup_misses+_lookup_hits)*4+100 >= _look_probes, "bad hash function");
340 assert( _inserts+(_inserts>>3) < _max, "table too full" );
341 assert( _inserts*3+100 >= _insert_probes, "bad hash function" );
342 }
343 }
344
345 Node *NodeHash::find_index(uint idx) { // For debugging
346 // Find an entry by its index value
347 for( uint i = 0; i < _max; i++ ) {
348 Node *m = _table[i];
349 if( !m || m == _sentinel ) continue;
350 if( m->_idx == (uint)idx ) return m;
351 }
352 return nullptr;
353 }
354 #endif
355
356 #ifdef ASSERT
357 NodeHash::~NodeHash() {
358 // Unlock all nodes upon destruction of table.
359 if (_table != (Node**)badAddress) clear();
360 }
361 #endif
362
363 // Add users of 'n' that match 'predicate' to worklist
364 template <class Predicate>
365 static void add_users_to_worklist_if(Unique_Node_List& worklist, const Node* n, Predicate predicate) {
366 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
367 Node* u = n->fast_out(i);
368 if (predicate(u)) {
369 worklist.push(u);
370 }
371 }
372 }
373
374 //=============================================================================
375 //------------------------------PhaseRemoveUseless-----------------------------
376 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
377 PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List& worklist, PhaseNumber phase_num) : Phase(phase_num) {
378 C->print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
379 // Implementation requires an edge from root to each SafePointNode
380 // at a backward branch. Inserted in add_safepoint().
381
382 // Identify nodes that are reachable from below, useful.
383 C->identify_useful_nodes(_useful);
384 // Update dead node list
385 C->update_dead_node_list(_useful);
386
387 // Remove all useless nodes from PhaseValues' recorded types
388 // Must be done before disconnecting nodes to preserve hash-table-invariant
389 gvn->remove_useless_nodes(_useful.member_set());
390
391 // Remove all useless nodes from future worklist
392 worklist.remove_useless_nodes(_useful.member_set());
393
394 // Disconnect 'useless' nodes that are adjacent to useful nodes
395 C->disconnect_useless_nodes(_useful, worklist);
396 }
397
398 //=============================================================================
399 //------------------------------PhaseRenumberLive------------------------------
400 // First, remove useless nodes (equivalent to identifying live nodes).
401 // Then, renumber live nodes.
402 //
403 // The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
404 // If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
405 // PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
406 // value in the range [0, x).
407 //
408 // At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
409 // updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
410 //
411 // The PhaseRenumberLive phase updates two data structures with the new node IDs.
412 // (1) The "worklist" is "C->igvn_worklist()", which is to collect which nodes need to
413 // be processed by IGVN after removal of the useless nodes.
414 // (2) Type information "gvn->types()" (same as "C->types()") maps every node ID to
415 // the node's type. The mapping is updated to use the new node IDs as well. We
416 // create a new map, and swap it with the old one.
417 //
418 // Other data structures used by the compiler are not updated. The hash table for value
419 // numbering ("C->node_hash()", referenced by PhaseValue::_table) is not updated because
420 // computing the hash values is not based on node IDs.
421 PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn,
422 Unique_Node_List& worklist,
423 PhaseNumber phase_num) :
424 PhaseRemoveUseless(gvn, worklist, Remove_Useless_And_Renumber_Live),
425 _new_type_array(C->comp_arena()),
426 _old2new_map(C->unique(), C->unique(), -1),
427 _is_pass_finished(false),
428 _live_node_count(C->live_nodes())
429 {
430 assert(RenumberLiveNodes, "RenumberLiveNodes must be set to true for node renumbering to take place");
431 assert(C->live_nodes() == _useful.size(), "the number of live nodes must match the number of useful nodes");
432 assert(_delayed.size() == 0, "should be empty");
433 assert(&worklist == C->igvn_worklist(), "reference still same as the one from Compile");
434 assert(&gvn->types() == C->types(), "reference still same as that from Compile");
435
436 GrowableArray<Node_Notes*>* old_node_note_array = C->node_note_array();
437 if (old_node_note_array != nullptr) {
438 int new_size = (_useful.size() >> 8) + 1; // The node note array uses blocks, see C->_log2_node_notes_block_size
439 new_size = MAX2(8, new_size);
440 C->set_node_note_array(new (C->comp_arena()) GrowableArray<Node_Notes*> (C->comp_arena(), new_size, 0, nullptr));
441 C->grow_node_notes(C->node_note_array(), new_size);
442 }
443
444 assert(worklist.is_subset_of(_useful), "only useful nodes should still be in the worklist");
445
446 // Iterate over the set of live nodes.
447 for (uint current_idx = 0; current_idx < _useful.size(); current_idx++) {
448 Node* n = _useful.at(current_idx);
449
450 const Type* type = gvn->type_or_null(n);
451 _new_type_array.map(current_idx, type);
452
453 assert(_old2new_map.at(n->_idx) == -1, "already seen");
454 _old2new_map.at_put(n->_idx, current_idx);
455
456 if (old_node_note_array != nullptr) {
457 Node_Notes* nn = C->locate_node_notes(old_node_note_array, n->_idx);
458 C->set_node_notes_at(current_idx, nn);
459 }
460
461 n->set_idx(current_idx); // Update node ID.
462
463 if (update_embedded_ids(n) < 0) {
464 _delayed.push(n); // has embedded IDs; handle later
465 }
466 }
467
468 // VectorSet in Unique_Node_Set must be recomputed, since IDs have changed.
469 worklist.recompute_idx_set();
470
471 assert(_live_node_count == _useful.size(), "all live nodes must be processed");
472
473 _is_pass_finished = true; // pass finished; safe to process delayed updates
474
475 while (_delayed.size() > 0) {
476 Node* n = _delayed.pop();
477 int no_of_updates = update_embedded_ids(n);
478 assert(no_of_updates > 0, "should be updated");
479 }
480
481 // Replace the compiler's type information with the updated type information.
482 gvn->types().swap(_new_type_array);
483
484 // Update the unique node count of the compilation to the number of currently live nodes.
485 C->set_unique(_live_node_count);
486
487 // Set the dead node count to 0 and reset dead node list.
488 C->reset_dead_node_list();
489 }
490
491 int PhaseRenumberLive::new_index(int old_idx) {
492 assert(_is_pass_finished, "not finished");
493 if (_old2new_map.at(old_idx) == -1) { // absent
494 // Allocate a placeholder to preserve uniqueness
495 _old2new_map.at_put(old_idx, _live_node_count);
496 _live_node_count++;
497 }
498 return _old2new_map.at(old_idx);
499 }
500
501 int PhaseRenumberLive::update_embedded_ids(Node* n) {
502 int no_of_updates = 0;
503 if (n->is_Phi()) {
504 PhiNode* phi = n->as_Phi();
505 if (phi->_inst_id != -1) {
506 if (!_is_pass_finished) {
507 return -1; // delay
508 }
509 int new_idx = new_index(phi->_inst_id);
510 assert(new_idx != -1, "");
511 phi->_inst_id = new_idx;
512 no_of_updates++;
513 }
514 if (phi->_inst_mem_id != -1) {
515 if (!_is_pass_finished) {
516 return -1; // delay
517 }
518 int new_idx = new_index(phi->_inst_mem_id);
519 assert(new_idx != -1, "");
520 phi->_inst_mem_id = new_idx;
521 no_of_updates++;
522 }
523 }
524
525 const Type* type = _new_type_array.fast_lookup(n->_idx);
526 if (type != nullptr && type->isa_oopptr() && type->is_oopptr()->is_known_instance()) {
527 if (!_is_pass_finished) {
528 return -1; // delay
529 }
530 int old_idx = type->is_oopptr()->instance_id();
531 int new_idx = new_index(old_idx);
532 const Type* new_type = type->is_oopptr()->with_instance_id(new_idx);
533 _new_type_array.map(n->_idx, new_type);
534 no_of_updates++;
535 }
536
537 return no_of_updates;
538 }
539
540 void PhaseValues::init_con_caches() {
541 memset(_icons,0,sizeof(_icons));
542 memset(_lcons,0,sizeof(_lcons));
543 memset(_zcons,0,sizeof(_zcons));
544 }
545
546 PhaseIterGVN* PhaseValues::is_IterGVN() {
547 return (_phase == PhaseValuesType::iter_gvn || _phase == PhaseValuesType::ccp) ? static_cast<PhaseIterGVN*>(this) : nullptr;
548 }
549
550 //--------------------------------find_int_type--------------------------------
551 const TypeInt* PhaseValues::find_int_type(Node* n) {
552 if (n == nullptr) return nullptr;
553 // Call type_or_null(n) to determine node's type since we might be in
554 // parse phase and call n->Value() may return wrong type.
555 // (For example, a phi node at the beginning of loop parsing is not ready.)
556 const Type* t = type_or_null(n);
557 if (t == nullptr) return nullptr;
558 return t->isa_int();
559 }
560
561
562 //-------------------------------find_long_type--------------------------------
563 const TypeLong* PhaseValues::find_long_type(Node* n) {
564 if (n == nullptr) return nullptr;
565 // (See comment above on type_or_null.)
566 const Type* t = type_or_null(n);
567 if (t == nullptr) return nullptr;
568 return t->isa_long();
569 }
570
571 //------------------------------~PhaseValues-----------------------------------
572 #ifndef PRODUCT
573 PhaseValues::~PhaseValues() {
574 // Statistics for NodeHash
575 _table.dump();
576 // Statistics for value progress and efficiency
577 if( PrintCompilation && Verbose && WizardMode ) {
578 tty->print("\n%sValues: %d nodes ---> " UINT64_FORMAT "/%d (%d)",
579 is_IterGVN() ? "Iter" : " ", C->unique(), made_progress(), made_transforms(), made_new_values());
580 if( made_transforms() != 0 ) {
581 tty->print_cr(" ratio %f", made_progress()/(float)made_transforms() );
582 } else {
583 tty->cr();
584 }
585 }
586 }
587 #endif
588
589 //------------------------------makecon----------------------------------------
590 ConNode* PhaseValues::makecon(const Type* t) {
591 assert(t->singleton(), "must be a constant");
592 assert(!t->empty() || t == Type::TOP, "must not be vacuous range");
593 switch (t->base()) { // fast paths
594 case Type::Half:
595 case Type::Top: return (ConNode*) C->top();
596 case Type::Int: return intcon( t->is_int()->get_con() );
597 case Type::Long: return longcon( t->is_long()->get_con() );
598 default: break;
599 }
600 if (t->is_zero_type())
601 return zerocon(t->basic_type());
602 return uncached_makecon(t);
603 }
604
605 //--------------------------uncached_makecon-----------------------------------
606 // Make an idealized constant - one of ConINode, ConPNode, etc.
607 ConNode* PhaseValues::uncached_makecon(const Type *t) {
608 assert(t->singleton(), "must be a constant");
609 ConNode* x = ConNode::make(t);
610 ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering
611 if (k == nullptr) {
612 set_type(x, t); // Missed, provide type mapping
613 GrowableArray<Node_Notes*>* nna = C->node_note_array();
614 if (nna != nullptr) {
615 Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true);
616 loc->clear(); // do not put debug info on constants
617 }
618 } else {
619 x->destruct(this); // Hit, destroy duplicate constant
620 x = k; // use existing constant
621 }
622 return x;
623 }
624
625 //------------------------------intcon-----------------------------------------
626 // Fast integer constant. Same as "transform(new ConINode(TypeInt::make(i)))"
627 ConINode* PhaseValues::intcon(jint i) {
628 // Small integer? Check cache! Check that cached node is not dead
629 if (i >= _icon_min && i <= _icon_max) {
630 ConINode* icon = _icons[i-_icon_min];
631 if (icon != nullptr && icon->in(TypeFunc::Control) != nullptr)
632 return icon;
633 }
634 ConINode* icon = (ConINode*) uncached_makecon(TypeInt::make(i));
635 assert(icon->is_Con(), "");
636 if (i >= _icon_min && i <= _icon_max)
637 _icons[i-_icon_min] = icon; // Cache small integers
638 return icon;
639 }
640
641 //------------------------------longcon----------------------------------------
642 // Fast long constant.
643 ConLNode* PhaseValues::longcon(jlong l) {
644 // Small integer? Check cache! Check that cached node is not dead
645 if (l >= _lcon_min && l <= _lcon_max) {
646 ConLNode* lcon = _lcons[l-_lcon_min];
647 if (lcon != nullptr && lcon->in(TypeFunc::Control) != nullptr)
648 return lcon;
649 }
650 ConLNode* lcon = (ConLNode*) uncached_makecon(TypeLong::make(l));
651 assert(lcon->is_Con(), "");
652 if (l >= _lcon_min && l <= _lcon_max)
653 _lcons[l-_lcon_min] = lcon; // Cache small integers
654 return lcon;
655 }
656 ConNode* PhaseValues::integercon(jlong l, BasicType bt) {
657 if (bt == T_INT) {
658 return intcon(checked_cast<jint>(l));
659 }
660 assert(bt == T_LONG, "not an integer");
661 return longcon(l);
662 }
663
664
665 //------------------------------zerocon-----------------------------------------
666 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
667 ConNode* PhaseValues::zerocon(BasicType bt) {
668 assert((uint)bt <= _zcon_max, "domain check");
669 ConNode* zcon = _zcons[bt];
670 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
671 return zcon;
672 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
673 _zcons[bt] = zcon;
674 return zcon;
675 }
676
677
678
679 //=============================================================================
680 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
681 Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape);
682 if (i == nullptr) {
683 i = k->Ideal(this, can_reshape);
684 }
685 return i;
686 }
687
688 //------------------------------transform--------------------------------------
689 // Return a node which computes the same function as this node, but
690 // in a faster or cheaper fashion.
691 Node* PhaseGVN::transform(Node* n) {
692 NOT_PRODUCT( set_transforms(); )
693
694 // Apply the Ideal call in a loop until it no longer applies
695 Node* k = n;
696 Node* i = apply_ideal(k, /*can_reshape=*/false);
697 NOT_PRODUCT(uint loop_count = 1;)
698 while (i != nullptr) {
699 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
700 k = i;
701 #ifdef ASSERT
702 if (loop_count >= K + C->live_nodes()) {
703 dump_infinite_loop_info(i, "PhaseGVN::transform");
704 }
705 #endif
706 i = apply_ideal(k, /*can_reshape=*/false);
707 NOT_PRODUCT(loop_count++;)
708 }
709 NOT_PRODUCT(if (loop_count != 0) { set_progress(); })
710
711 // If brand new node, make space in type array.
712 ensure_type_or_null(k);
713
714 // Since I just called 'Value' to compute the set of run-time values
715 // for this Node, and 'Value' is non-local (and therefore expensive) I'll
716 // cache Value. Later requests for the local phase->type of this Node can
717 // use the cached Value instead of suffering with 'bottom_type'.
718 const Type* t = k->Value(this); // Get runtime Value set
719 assert(t != nullptr, "value sanity");
720 if (type_or_null(k) != t) {
721 #ifndef PRODUCT
722 // Do not count initial visit to node as a transformation
723 if (type_or_null(k) == nullptr) {
724 inc_new_values();
725 set_progress();
726 }
727 #endif
728 set_type(k, t);
729 // If k is a TypeNode, capture any more-precise type permanently into Node
730 k->raise_bottom_type(t);
731 }
732
733 if (t->singleton() && !k->is_Con()) {
734 set_progress();
735 return makecon(t); // Turn into a constant
736 }
737
738 // Now check for Identities
739 i = k->Identity(this); // Look for a nearby replacement
740 if (i != k) { // Found? Return replacement!
741 set_progress();
742 return i;
743 }
744
745 // Global Value Numbering
746 i = hash_find_insert(k); // Insert if new
747 if (i && (i != k)) {
748 // Return the pre-existing node
749 set_progress();
750 return i;
751 }
752
753 // Return Idealized original
754 return k;
755 }
756
757 bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) {
758 if (d->is_top() || (d->is_Proj() && d->in(0)->is_top())) {
759 return false;
760 }
761 if (n->is_top() || (n->is_Proj() && n->in(0)->is_top())) {
762 return false;
763 }
764 assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes");
765 int i = 0;
766 while (d != n) {
767 n = IfNode::up_one_dom(n, linear_only);
768 i++;
769 if (n == nullptr || i >= 100) {
770 return false;
771 }
772 }
773 return true;
774 }
775
776 #ifdef ASSERT
777 //------------------------------dead_loop_check--------------------------------
778 // Check for a simple dead loop when a data node references itself directly
779 // or through an other data node excluding cons and phis.
780 void PhaseGVN::dead_loop_check(Node* n) {
781 // Phi may reference itself in a loop.
782 if (n == nullptr || n->is_dead_loop_safe() || n->is_CFG()) {
783 return;
784 }
785
786 // Do 2 levels check and only data inputs.
787 for (uint i = 1; i < n->req(); i++) {
788 Node* in = n->in(i);
789 if (in == n) {
790 n->dump_bfs(100, nullptr, "");
791 fatal("Dead loop detected, node references itself: %s (%d)",
792 n->Name(), n->_idx);
793 }
794
795 if (in == nullptr || in->is_dead_loop_safe()) {
796 continue;
797 }
798 for (uint j = 1; j < in->req(); j++) {
799 if (in->in(j) == n) {
800 n->dump_bfs(100, nullptr, "");
801 fatal("Dead loop detected, node input references current node: %s (%d) -> %s (%d)",
802 in->Name(), in->_idx, n->Name(), n->_idx);
803 }
804 if (in->in(j) == in) {
805 n->dump_bfs(100, nullptr, "");
806 fatal("Dead loop detected, node input references itself: %s (%d)",
807 in->Name(), in->_idx);
808 }
809 }
810 }
811 }
812
813
814 /**
815 * Dumps information that can help to debug the problem. A debug
816 * build fails with an assert.
817 */
818 void PhaseGVN::dump_infinite_loop_info(Node* n, const char* where) {
819 n->dump(4);
820 assert(false, "infinite loop in %s", where);
821 }
822 #endif
823
824 //=============================================================================
825 //------------------------------PhaseIterGVN-----------------------------------
826 // Initialize with previous PhaseIterGVN info; used by PhaseCCP
827 PhaseIterGVN::PhaseIterGVN(PhaseIterGVN* igvn) : _delay_transform(igvn->_delay_transform),
828 _worklist(*C->igvn_worklist())
829 {
830 _phase = PhaseValuesType::iter_gvn;
831 assert(&_worklist == &igvn->_worklist, "sanity");
832 }
833
834 //------------------------------PhaseIterGVN-----------------------------------
835 // Initialize from scratch
836 PhaseIterGVN::PhaseIterGVN() : _delay_transform(false),
837 _worklist(*C->igvn_worklist())
838 {
839 _phase = PhaseValuesType::iter_gvn;
840 uint max;
841
842 // Dead nodes in the hash table inherited from GVN were not treated as
843 // roots during def-use info creation; hence they represent an invisible
844 // use. Clear them out.
845 max = _table.size();
846 for( uint i = 0; i < max; ++i ) {
847 Node *n = _table.at(i);
848 if(n != nullptr && n != _table.sentinel() && n->outcnt() == 0) {
849 if( n->is_top() ) continue;
850 // If remove_useless_nodes() has run, we expect no such nodes left.
851 assert(false, "remove_useless_nodes missed this node");
852 hash_delete(n);
853 }
854 }
855
856 // Any Phis or Regions on the worklist probably had uses that could not
857 // make more progress because the uses were made while the Phis and Regions
858 // were in half-built states. Put all uses of Phis and Regions on worklist.
859 max = _worklist.size();
860 for( uint j = 0; j < max; j++ ) {
861 Node *n = _worklist.at(j);
862 uint uop = n->Opcode();
863 if( uop == Op_Phi || uop == Op_Region ||
864 n->is_Type() ||
865 n->is_Mem() )
866 add_users_to_worklist(n);
867 }
868 }
869
870 void PhaseIterGVN::shuffle_worklist() {
871 if (_worklist.size() < 2) return;
872 for (uint i = _worklist.size() - 1; i >= 1; i--) {
873 uint j = C->random() % (i + 1);
874 swap(_worklist.adr()[i], _worklist.adr()[j]);
875 }
876 }
877
878 #ifndef PRODUCT
879 void PhaseIterGVN::verify_step(Node* n) {
880 if (is_verify_def_use()) {
881 ResourceMark rm;
882 VectorSet visited;
883 Node_List worklist;
884
885 _verify_window[_verify_counter % _verify_window_size] = n;
886 ++_verify_counter;
887 if (C->unique() < 1000 || 0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) {
888 ++_verify_full_passes;
889 worklist.push(C->root());
890 Node::verify(-1, visited, worklist);
891 return;
892 }
893 for (int i = 0; i < _verify_window_size; i++) {
894 Node* n = _verify_window[i];
895 if (n == nullptr) {
896 continue;
897 }
898 if (n->in(0) == NodeSentinel) { // xform_idom
899 _verify_window[i] = n->in(1);
900 --i;
901 continue;
902 }
903 // Typical fanout is 1-2, so this call visits about 6 nodes.
904 if (!visited.test_set(n->_idx)) {
905 worklist.push(n);
906 }
907 }
908 Node::verify(4, visited, worklist);
909 }
910 }
911
912 void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype, bool progress) {
913 const Type* newtype = type_or_null(n);
914 if (progress) {
915 C->print_method(PHASE_AFTER_ITER_GVN_STEP, 5, n);
916 }
917 if (TraceIterativeGVN) {
918 uint wlsize = _worklist.size();
919 if (nn != n) {
920 // print old node
921 tty->print("< ");
922 if (oldtype != newtype && oldtype != nullptr) {
923 oldtype->dump();
924 }
925 do { tty->print("\t"); } while (tty->position() < 16);
926 tty->print("<");
927 n->dump();
928 }
929 if (oldtype != newtype || nn != n) {
930 // print new node and/or new type
931 if (oldtype == nullptr) {
932 tty->print("* ");
933 } else if (nn != n) {
934 tty->print("> ");
935 } else {
936 tty->print("= ");
937 }
938 if (newtype == nullptr) {
939 tty->print("null");
940 } else {
941 newtype->dump();
942 }
943 do { tty->print("\t"); } while (tty->position() < 16);
944 nn->dump();
945 }
946 if (Verbose && wlsize < _worklist.size()) {
947 tty->print(" Push {");
948 while (wlsize != _worklist.size()) {
949 Node* pushed = _worklist.at(wlsize++);
950 tty->print(" %d", pushed->_idx);
951 }
952 tty->print_cr(" }");
953 }
954 if (nn != n) {
955 // ignore n, it might be subsumed
956 verify_step((Node*) nullptr);
957 }
958 }
959 }
960
961 void PhaseIterGVN::init_verifyPhaseIterGVN() {
962 _verify_counter = 0;
963 _verify_full_passes = 0;
964 for (int i = 0; i < _verify_window_size; i++) {
965 _verify_window[i] = nullptr;
966 }
967 #ifdef ASSERT
968 // Verify that all modified nodes are on _worklist
969 Unique_Node_List* modified_list = C->modified_nodes();
970 while (modified_list != nullptr && modified_list->size()) {
971 Node* n = modified_list->pop();
972 if (!n->is_Con() && !_worklist.member(n)) {
973 n->dump();
974 fatal("modified node is not on IGVN._worklist");
975 }
976 }
977 #endif
978 }
979
980 void PhaseIterGVN::verify_PhaseIterGVN(bool deep_revisit_converged) {
981 #ifdef ASSERT
982 // Verify nodes with changed inputs.
983 Unique_Node_List* modified_list = C->modified_nodes();
984 while (modified_list != nullptr && modified_list->size()) {
985 Node* n = modified_list->pop();
986 if (!n->is_Con()) { // skip Con nodes
987 n->dump();
988 fatal("modified node was not processed by IGVN.transform_old()");
989 }
990 }
991 #endif
992
993 C->verify_graph_edges();
994 if (is_verify_def_use() && PrintOpto) {
995 if (_verify_counter == _verify_full_passes) {
996 tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes",
997 (int) _verify_full_passes);
998 } else {
999 tty->print_cr("VerifyIterativeGVN: %d transforms, %d full verify passes",
1000 (int) _verify_counter, (int) _verify_full_passes);
1001 }
1002 }
1003
1004 #ifdef ASSERT
1005 if (modified_list != nullptr) {
1006 while (modified_list->size() > 0) {
1007 Node* n = modified_list->pop();
1008 n->dump();
1009 assert(false, "VerifyIterativeGVN: new modified node was added");
1010 }
1011 }
1012
1013 verify_optimize(deep_revisit_converged);
1014 #endif
1015 }
1016 #endif /* PRODUCT */
1017
1018 #ifdef ASSERT
1019 /**
1020 * Dumps information that can help to debug the problem. A debug
1021 * build fails with an assert.
1022 */
1023 void PhaseIterGVN::dump_infinite_loop_info(Node* n, const char* where) {
1024 n->dump(4);
1025 _worklist.dump();
1026 assert(false, "infinite loop in %s", where);
1027 }
1028
1029 /**
1030 * Prints out information about IGVN if the 'verbose' option is used.
1031 */
1032 void PhaseIterGVN::trace_PhaseIterGVN_verbose(Node* n, int num_processed) {
1033 if (TraceIterativeGVN && Verbose) {
1034 tty->print(" Pop ");
1035 n->dump();
1036 if ((num_processed % 100) == 0) {
1037 _worklist.print_set();
1038 }
1039 }
1040 }
1041 #endif /* ASSERT */
1042
1043 bool PhaseIterGVN::needs_deep_revisit(const Node* n) const {
1044 // LoadNode::Value() -> can_see_stored_value() walks up through many memory
1045 // nodes. LoadNode::Ideal() -> find_previous_store() also walks up to 50
1046 // nodes through stores and arraycopy nodes.
1047 if (n->is_Load()) {
1048 return true;
1049 }
1050 // CmpPNode::sub() -> detect_ptr_independence() -> all_controls_dominate()
1051 // walks CFG dominator relationships extensively. This only triggers when
1052 // both inputs are oop pointers (subnode.cpp:984).
1053 if (n->Opcode() == Op_CmpP) {
1054 const Type* t1 = type_or_null(n->in(1));
1055 const Type* t2 = type_or_null(n->in(2));
1056 return t1 != nullptr && t1->isa_oopptr() &&
1057 t2 != nullptr && t2->isa_oopptr();
1058 }
1059 // IfNode::Ideal() -> search_identical() walks up the CFG dominator tree.
1060 // RangeCheckNode::Ideal() scans up to ~999 nodes up the chain.
1061 // CountedLoopEndNode/LongCountedLoopEndNode::Ideal() via simple_subsuming
1062 // looks for dominating test that subsumes the current test.
1063 switch (n->Opcode()) {
1064 case Op_If:
1065 case Op_RangeCheck:
1066 case Op_CountedLoopEnd:
1067 case Op_LongCountedLoopEnd:
1068 return true;
1069 default:
1070 break;
1071 }
1072 return false;
1073 }
1074
1075 bool PhaseIterGVN::drain_worklist() {
1076 uint loop_count = 1;
1077 const int max_live_nodes_increase_per_iteration = NodeLimitFudgeFactor * 5;
1078 while (_worklist.size() != 0) {
1079 if (C->check_node_count(max_live_nodes_increase_per_iteration, "Out of nodes")) {
1080 C->print_method(PHASE_AFTER_ITER_GVN, 3);
1081 return true;
1082 }
1083 Node* n = _worklist.pop();
1084 if (loop_count >= K * C->live_nodes()) {
1085 DEBUG_ONLY(dump_infinite_loop_info(n, "PhaseIterGVN::drain_worklist");)
1086 C->record_method_not_compilable("infinite loop in PhaseIterGVN::drain_worklist");
1087 C->print_method(PHASE_AFTER_ITER_GVN, 3);
1088 return true;
1089 }
1090 DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, _num_processed++);)
1091 if (n->outcnt() != 0) {
1092 NOT_PRODUCT(const Type* oldtype = type_or_null(n));
1093 // Do the transformation
1094 DEBUG_ONLY(int live_nodes_before = C->live_nodes();)
1095 NOT_PRODUCT(uint progress_before = made_progress();)
1096 Node* nn = transform_old(n);
1097 NOT_PRODUCT(bool progress = (made_progress() - progress_before) > 0;)
1098 DEBUG_ONLY(int live_nodes_after = C->live_nodes();)
1099 // Ensure we did not increase the live node count with more than
1100 // max_live_nodes_increase_per_iteration during the call to transform_old.
1101 DEBUG_ONLY(int increase = live_nodes_after - live_nodes_before;)
1102 assert(increase < max_live_nodes_increase_per_iteration,
1103 "excessive live node increase in single iteration of IGVN: %d "
1104 "(should be at most %d)",
1105 increase, max_live_nodes_increase_per_iteration);
1106 NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype, progress);)
1107 } else if (!n->is_top()) {
1108 remove_dead_node(n, NodeOrigin::Graph);
1109 }
1110 loop_count++;
1111 }
1112 return false;
1113 }
1114
1115 void PhaseIterGVN::push_deep_revisit_candidates() {
1116 ResourceMark rm;
1117 Unique_Node_List all_nodes;
1118 all_nodes.push(C->root());
1119 for (uint j = 0; j < all_nodes.size(); j++) {
1120 Node* n = all_nodes.at(j);
1121 if (needs_deep_revisit(n)) {
1122 _worklist.push(n);
1123 }
1124 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1125 all_nodes.push(n->fast_out(i));
1126 }
1127 }
1128 }
1129
1130 bool PhaseIterGVN::deep_revisit() {
1131 // Re-process nodes that inspect the graph deeply. After the main worklist drains, walk
1132 // the graph to find all live deep-inspection nodes and push them to the worklist
1133 // for re-evaluation. If any produce changes, drain the worklist again.
1134 // Repeat until stable. This mirrors PhaseCCP::analyze()'s revisit loop.
1135 const uint max_deep_revisit_rounds = 10; // typically converges in <2 rounds
1136 uint round = 0;
1137 for (; round < max_deep_revisit_rounds; round++) {
1138 push_deep_revisit_candidates();
1139 if (_worklist.size() == 0) {
1140 break; // No deep-inspection nodes to revisit, done.
1141 }
1142
1143 #ifndef PRODUCT
1144 uint candidates = _worklist.size();
1145 uint n_if = 0; uint n_rc = 0; uint n_load = 0; uint n_cmpp = 0; uint n_cle = 0; uint n_lcle = 0;
1146 if (TraceIterativeGVN) {
1147 for (uint i = 0; i < _worklist.size(); i++) {
1148 Node* n = _worklist.at(i);
1149 switch (n->Opcode()) {
1150 case Op_If: n_if++; break;
1151 case Op_RangeCheck: n_rc++; break;
1152 case Op_CountedLoopEnd: n_cle++; break;
1153 case Op_LongCountedLoopEnd: n_lcle++; break;
1154 case Op_CmpP: n_cmpp++; break;
1155 default: if (n->is_Load()) n_load++; break;
1156 }
1157 }
1158 }
1159 #endif
1160
1161 // Convergence: if the drain does not make progress (no Ideal, Value, Identity or GVN changes),
1162 // we are at a fixed point. We use made_progress() rather than live_nodes because live_nodes
1163 // misses non-structural changes like a LoadNode dropping its control input.
1164 uint progress_before = made_progress();
1165 if (drain_worklist()) {
1166 return false;
1167 }
1168 uint progress = made_progress() - progress_before;
1169
1170 #ifndef PRODUCT
1171 if (TraceIterativeGVN) {
1172 tty->print("deep_revisit round %u: %u candidates (If=%u RC=%u Load=%u CmpP=%u CLE=%u LCLE=%u), progress=%u (%s)",
1173 round, candidates, n_if, n_rc, n_load, n_cmpp, n_cle, n_lcle, progress, progress != 0 ? "changed" : "converged");
1174 if (C->method() != nullptr) {
1175 tty->print(", ");
1176 C->method()->print_short_name(tty);
1177 }
1178 tty->cr();
1179 }
1180 #endif
1181
1182 if (progress == 0) {
1183 break;
1184 }
1185 }
1186 return round < max_deep_revisit_rounds;
1187 }
1188
1189 void PhaseIterGVN::optimize(bool deep) {
1190 bool deep_revisit_converged = false;
1191 DEBUG_ONLY(_num_processed = 0;)
1192 NOT_PRODUCT(init_verifyPhaseIterGVN();)
1193 NOT_PRODUCT(C->reset_igv_phase_iter(PHASE_AFTER_ITER_GVN_STEP);)
1194 C->print_method(PHASE_BEFORE_ITER_GVN, 3);
1195 if (StressIGVN) {
1196 shuffle_worklist();
1197 }
1198
1199 // Pull from worklist and transform the node.
1200 if (drain_worklist()) {
1201 return;
1202 }
1203
1204 if (deep && UseDeepIGVNRevisit) {
1205 deep_revisit_converged = deep_revisit();
1206 if (C->failing()) {
1207 return;
1208 }
1209 }
1210
1211 NOT_PRODUCT(verify_PhaseIterGVN(deep_revisit_converged);)
1212 C->print_method(PHASE_AFTER_ITER_GVN, 3);
1213 }
1214
1215 #ifdef ASSERT
1216 void PhaseIterGVN::verify_optimize(bool deep_revisit_converged) {
1217 assert(_worklist.size() == 0, "igvn worklist must be empty before verify");
1218
1219 if (is_verify_Value() ||
1220 is_verify_Ideal() ||
1221 is_verify_Identity() ||
1222 is_verify_invariants()) {
1223 ResourceMark rm;
1224 Unique_Node_List worklist;
1225 // BFS all nodes, starting at root
1226 worklist.push(C->root());
1227 for (uint j = 0; j < worklist.size(); ++j) {
1228 Node* n = worklist.at(j);
1229 // If we get an assert here, check why the reported node was not processed again in IGVN.
1230 // We should either make sure that this node is properly added back to the IGVN worklist
1231 // in PhaseIterGVN::add_users_to_worklist to update it again or add an exception
1232 // in the verification methods below if that is not possible for some reason (like Load nodes).
1233 if (is_verify_Value()) {
1234 verify_Value_for(n, deep_revisit_converged /* strict */);
1235 }
1236 if (is_verify_Ideal()) {
1237 verify_Ideal_for(n, false /* can_reshape */, deep_revisit_converged);
1238 verify_Ideal_for(n, true /* can_reshape */, deep_revisit_converged);
1239 }
1240 if (is_verify_Identity()) {
1241 verify_Identity_for(n);
1242 }
1243 if (is_verify_invariants()) {
1244 verify_node_invariants_for(n);
1245 }
1246
1247 // traverse all inputs and outputs
1248 for (uint i = 0; i < n->req(); i++) {
1249 if (n->in(i) != nullptr) {
1250 worklist.push(n->in(i));
1251 }
1252 }
1253 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1254 worklist.push(n->fast_out(i));
1255 }
1256 }
1257 }
1258
1259 verify_empty_worklist(nullptr);
1260 }
1261
1262 void PhaseIterGVN::verify_empty_worklist(Node* node) {
1263 // Verify that the igvn worklist is empty. If no optimization happened, then
1264 // nothing needs to be on the worklist.
1265 if (_worklist.size() == 0) { return; }
1266
1267 stringStream ss; // Print as a block without tty lock.
1268 for (uint j = 0; j < _worklist.size(); j++) {
1269 Node* n = _worklist.at(j);
1270 ss.print("igvn.worklist[%d] ", j);
1271 n->dump("\n", false, &ss);
1272 }
1273 if (_worklist.size() != 0 && node != nullptr) {
1274 ss.print_cr("Previously optimized:");
1275 node->dump("\n", false, &ss);
1276 }
1277 tty->print_cr("%s", ss.as_string());
1278 assert(false, "igvn worklist must still be empty after verify");
1279 }
1280
1281 // Check that type(n) == n->Value(), asserts if we have a failure.
1282 // We have a list of exceptions, see detailed comments in code.
1283 // (1) Integer "widen" changes, but the range is the same.
1284 // (2) LoadNode performs deep traversals. Load is not notified for changes far away.
1285 // (3) CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1286 void PhaseIterGVN::verify_Value_for(const Node* n, bool strict) {
1287 // If we assert inside type(n), because the type is still a null, then maybe
1288 // the node never went through gvn.transform, which would be a bug.
1289 const Type* told = type(n);
1290 const Type* tnew = n->Value(this);
1291 if (told == tnew) {
1292 return;
1293 }
1294 // Exception (1)
1295 // Integer "widen" changes, but range is the same.
1296 if (told->isa_integer(tnew->basic_type()) != nullptr) { // both either int or long
1297 const TypeInteger* t0 = told->is_integer(tnew->basic_type());
1298 const TypeInteger* t1 = tnew->is_integer(tnew->basic_type());
1299 if (t0->lo_as_long() == t1->lo_as_long() &&
1300 t0->hi_as_long() == t1->hi_as_long()) {
1301 return; // ignore integer widen
1302 }
1303 }
1304 // Exception (2)
1305 // LoadNode performs deep traversals. Load is not notified for changes far away.
1306 if (!strict && n->is_Load() && !told->singleton()) {
1307 // MemNode::can_see_stored_value looks up through many memory nodes,
1308 // which means we would need to notify modifications from far up in
1309 // the inputs all the way down to the LoadNode. We don't do that.
1310 return;
1311 }
1312 // Exception (3)
1313 // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1314 if (!strict && n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
1315 // SubNode::Value
1316 // CmpPNode::sub
1317 // MemNode::detect_ptr_independence
1318 // MemNode::all_controls_dominate
1319 // We find all controls of a pointer load, and see if they dominate the control of
1320 // an allocation. If they all dominate, we know the allocation is after (independent)
1321 // of the pointer load, and we can say the pointers are different. For this we call
1322 // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
1323 // control sub of the allocation. The problems is that sometimes dominates answers
1324 // false conservatively, and later it can determine that it is indeed true. Loops with
1325 // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
1326 // so the traversal becomes more powerful. This is difficult to remedy, we would have
1327 // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
1328 // after loop-opts, so that should take care of many of these cases.
1329 return;
1330 }
1331
1332 stringStream ss; // Print as a block without tty lock.
1333 ss.cr();
1334 ss.print_cr("Missed Value optimization:");
1335 n->dump_bfs(3, nullptr, "", &ss);
1336 ss.print_cr("Current type:");
1337 told->dump_on(&ss);
1338 ss.cr();
1339 ss.print_cr("Optimized type:");
1340 tnew->dump_on(&ss);
1341 ss.cr();
1342 tty->print_cr("%s", ss.as_string());
1343
1344 switch (_phase) {
1345 case PhaseValuesType::iter_gvn:
1346 assert(false, "Missed Value optimization opportunity in PhaseIterGVN for %s",n->Name());
1347 break;
1348 case PhaseValuesType::ccp:
1349 assert(false, "PhaseCCP not at fixpoint: analysis result may be unsound for %s", n->Name());
1350 break;
1351 default:
1352 assert(false, "Unexpected phase");
1353 break;
1354 }
1355 }
1356
1357 // Check that all Ideal optimizations that could be done were done.
1358 // Asserts if it found missed optimization opportunities or encountered unexpected changes, and
1359 // returns normally otherwise (no missed optimization, or skipped verification).
1360 void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape, bool deep_revisit_converged) {
1361 if (!deep_revisit_converged && needs_deep_revisit(n)) {
1362 return;
1363 }
1364
1365 // First, we check a list of exceptions, where we skip verification,
1366 // because there are known cases where Ideal can optimize after IGVN.
1367 // Some may be expected and cannot be fixed, and others should be fixed.
1368 switch (n->Opcode()) {
1369 // RegionNode::Ideal does "Skip around the useless IF diamond".
1370 // 245 IfTrue === 244
1371 // 258 If === 245 257
1372 // 259 IfTrue === 258 [[ 263 ]]
1373 // 260 IfFalse === 258 [[ 263 ]]
1374 // 263 Region === 263 260 259 [[ 263 268 ]]
1375 // to
1376 // 245 IfTrue === 244
1377 // 263 Region === 263 245 _ [[ 263 268 ]]
1378 //
1379 // "Useless" means that there is no code in either branch of the If.
1380 // I found a case where this was not done yet during IGVN.
1381 // Why does the Region not get added to IGVN worklist when the If diamond becomes useless?
1382 //
1383 // Found with:
1384 // java -XX:VerifyIterativeGVN=0100 -Xcomp --version
1385 case Op_Region:
1386 return;
1387
1388 // In AddNode::Ideal, we call "commute", which swaps the inputs so
1389 // that smaller idx are first. Tracking it back, it led me to
1390 // PhaseIdealLoop::remix_address_expressions which swapped the edges.
1391 //
1392 // Example:
1393 // Before PhaseIdealLoop::remix_address_expressions
1394 // 154 AddI === _ 12 144
1395 // After PhaseIdealLoop::remix_address_expressions
1396 // 154 AddI === _ 144 12
1397 // After AddNode::Ideal
1398 // 154 AddI === _ 12 144
1399 //
1400 // I suspect that the node should be added to the IGVN worklist after
1401 // PhaseIdealLoop::remix_address_expressions
1402 //
1403 // This is the only case I looked at, there may be others. Found like this:
1404 // java -XX:VerifyIterativeGVN=0100 -Xbatch --version
1405 //
1406 // The following hit the same logic in PhaseIdealLoop::remix_address_expressions.
1407 //
1408 // Note: currently all of these fail also for other reasons, for example
1409 // because of "commute" doing the reordering with the phi below. Once
1410 // that is resolved, we can come back to this issue here.
1411 //
1412 // case Op_AddD:
1413 // case Op_AddI:
1414 // case Op_AddL:
1415 // case Op_AddF:
1416 // case Op_MulI:
1417 // case Op_MulL:
1418 // case Op_MulF:
1419 // case Op_MulD:
1420 // if (n->in(1)->_idx > n->in(2)->_idx) {
1421 // // Expect "commute" to revert this case.
1422 // return false;
1423 // }
1424 // break; // keep verifying
1425
1426 // AddFNode::Ideal calls "commute", which can reorder the inputs for this:
1427 // Check for tight loop increments: Loop-phi of Add of loop-phi
1428 // It wants to take the phi into in(1):
1429 // 471 Phi === 435 38 390
1430 // 390 AddF === _ 471 391
1431 //
1432 // Other Associative operators are also affected equally.
1433 //
1434 // Investigate why this does not happen earlier during IGVN.
1435 //
1436 // Found with:
1437 // test/hotspot/jtreg/compiler/loopopts/superword/ReductionPerf.java
1438 // -XX:VerifyIterativeGVN=1110
1439 case Op_AddD:
1440 //case Op_AddI: // Also affected for other reasons, see case further down.
1441 //case Op_AddL: // Also affected for other reasons, see case further down.
1442 case Op_AddF:
1443 case Op_MulI:
1444 case Op_MulL:
1445 case Op_MulF:
1446 case Op_MulD:
1447 case Op_MinF:
1448 case Op_MinD:
1449 case Op_MaxF:
1450 case Op_MaxD:
1451 // XorINode::Ideal
1452 // Found with:
1453 // compiler/intrinsics/chacha/TestChaCha20.java
1454 // -XX:VerifyIterativeGVN=1110
1455 case Op_XorI:
1456 case Op_XorL:
1457 // It seems we may have similar issues with the HF cases.
1458 // Found with aarch64:
1459 // compiler/vectorization/TestFloat16VectorOperations.java
1460 // -XX:VerifyIterativeGVN=1110
1461 case Op_AddHF:
1462 case Op_MulHF:
1463 case Op_MaxHF:
1464 case Op_MinHF:
1465 return;
1466
1467 // In MulNode::Ideal the edges can be swapped to help value numbering:
1468 //
1469 // // We are OK if right is a constant, or right is a load and
1470 // // left is a non-constant.
1471 // if( !(t2->singleton() ||
1472 // (in(2)->is_Load() && !(t1->singleton() || in(1)->is_Load())) ) ) {
1473 // if( t1->singleton() || // Left input is a constant?
1474 // // Otherwise, sort inputs (commutativity) to help value numbering.
1475 // (in(1)->_idx > in(2)->_idx) ) {
1476 // swap_edges(1, 2);
1477 //
1478 // Why was this not done earlier during IGVN?
1479 //
1480 // Found with:
1481 // test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithG1.java
1482 // -XX:VerifyIterativeGVN=1110
1483 case Op_AndI:
1484 // Same for AndL.
1485 // Found with:
1486 // compiler/intrinsics/bigInteger/MontgomeryMultiplyTest.java
1487 // -XX:VerifyIterativeGVN=1110
1488 case Op_AndL:
1489 return;
1490
1491 // SubLNode::Ideal does transform like:
1492 // Convert "c1 - (y+c0)" into "(c1-c0) - y"
1493 //
1494 // In IGVN before verification:
1495 // 8423 ConvI2L === _ 3519 [[ 8424 ]] #long:-2
1496 // 8422 ConvI2L === _ 8399 [[ 8424 ]] #long:3..256:www
1497 // 8424 AddL === _ 8422 8423 [[ 8383 ]] !orig=[8382]
1498 // 8016 ConL === 0 [[ 8383 ]] #long:0
1499 // 8383 SubL === _ 8016 8424 [[ 8156 ]] !orig=[8154]
1500 //
1501 // And then in verification:
1502 // 8338 ConL === 0 [[ 8339 8424 ]] #long:-2 <----- Was constant folded.
1503 // 8422 ConvI2L === _ 8399 [[ 8424 ]] #long:3..256:www
1504 // 8424 AddL === _ 8422 8338 [[ 8383 ]] !orig=[8382]
1505 // 8016 ConL === 0 [[ 8383 ]] #long:0
1506 // 8383 SubL === _ 8016 8424 [[ 8156 ]] !orig=[8154]
1507 //
1508 // So the form changed from:
1509 // c1 - (y + [8423 ConvI2L])
1510 // to
1511 // c1 - (y + -2)
1512 // but the SubL was not added to the IGVN worklist. Investigate why.
1513 // There could be other issues too.
1514 //
1515 // There seems to be a related AddL IGVN optimization that triggers
1516 // the same SubL optimization, so investigate that too.
1517 //
1518 // Found with:
1519 // java -XX:VerifyIterativeGVN=0100 -Xcomp --version
1520 case Op_SubL:
1521 return;
1522
1523 // SubINode::Ideal does
1524 // Convert "x - (y+c0)" into "(x-y) - c0" AND
1525 // Convert "c1 - (y+c0)" into "(c1-c0) - y"
1526 //
1527 // Investigate why this does not yet happen during IGVN.
1528 //
1529 // Found with:
1530 // test/hotspot/jtreg/compiler/c2/IVTest.java
1531 // -XX:VerifyIterativeGVN=1110
1532 case Op_SubI:
1533 return;
1534
1535 // AddNode::IdealIL does transform like:
1536 // Convert x + (con - y) into "(x - y) + con"
1537 //
1538 // In IGVN before verification:
1539 // 8382 ConvI2L
1540 // 8381 ConvI2L === _ 791 [[ 8383 ]] #long:0
1541 // 8383 SubL === _ 8381 8382
1542 // 8168 ConvI2L
1543 // 8156 AddL === _ 8168 8383 [[ 8158 ]]
1544 //
1545 // And then in verification:
1546 // 8424 AddL
1547 // 8016 ConL === 0 [[ 8383 ]] #long:0 <--- Was constant folded.
1548 // 8383 SubL === _ 8016 8424
1549 // 8168 ConvI2L
1550 // 8156 AddL === _ 8168 8383 [[ 8158 ]]
1551 //
1552 // So the form changed from:
1553 // x + (ConvI2L(0) - [8382 ConvI2L])
1554 // to
1555 // x + (0 - [8424 AddL])
1556 // but the AddL was not added to the IGVN worklist. Investigate why.
1557 // There could be other issues, too. For example with "commute", see above.
1558 //
1559 // Found with:
1560 // java -XX:VerifyIterativeGVN=0100 -Xcomp --version
1561 case Op_AddL:
1562 return;
1563
1564 // SubTypeCheckNode::Ideal calls SubTypeCheckNode::verify_helper, which does
1565 // Node* cmp = phase->transform(new CmpPNode(subklass, in(SuperKlass)));
1566 // record_for_cleanup(cmp, phase);
1567 // This verification code in the Ideal code creates new nodes, and checks
1568 // if they fold in unexpected ways. This means some nodes are created and
1569 // added to the worklist, even if the SubTypeCheck is not optimized. This
1570 // goes agains the assumption of the verification here, which assumes that
1571 // if the node is not optimized, then no new nodes should be created, and
1572 // also no nodes should be added to the worklist.
1573 // I see two options:
1574 // 1) forbid what verify_helper does, because for each Ideal call it
1575 // uses memory and that is suboptimal. But it is not clear how that
1576 // verification can be done otherwise.
1577 // 2) Special case the verification here. Probably the new nodes that
1578 // were just created are dead, i.e. they are not connected down to
1579 // root. We could verify that, and remove those nodes from the graph
1580 // by setting all their inputs to nullptr. And of course we would
1581 // have to remove those nodes from the worklist.
1582 // Maybe there are other options too, I did not dig much deeper yet.
1583 //
1584 // Found with:
1585 // java -XX:VerifyIterativeGVN=0100 -Xbatch --version
1586 case Op_SubTypeCheck:
1587 return;
1588
1589 // LoopLimitNode::Ideal when stride is constant power-of-2, we can do a lowering
1590 // to other nodes: Conv, Add, Sub, Mul, And ...
1591 //
1592 // 107 ConI === 0 [[ ... ]] #int:2
1593 // 84 LoadRange === _ 7 83
1594 // 50 ConI === 0 [[ ... ]] #int:0
1595 // 549 LoopLimit === _ 50 84 107
1596 //
1597 // I stepped backward, to see how the node was generated, and I found that it was
1598 // created in PhaseIdealLoop::exact_limit and not changed since. It is added to the
1599 // IGVN worklist. I quickly checked when it goes into LoopLimitNode::Ideal after
1600 // that, and it seems we want to skip lowering it until after loop-opts, but never
1601 // add call record_for_post_loop_opts_igvn. This would be an easy fix, but there
1602 // could be other issues too.
1603 //
1604 // Fond with:
1605 // java -XX:VerifyIterativeGVN=0100 -Xcomp --version
1606 case Op_LoopLimit:
1607 return;
1608
1609 // PhiNode::Ideal calls split_flow_path, which tries to do this:
1610 // "This optimization tries to find two or more inputs of phi with the same constant
1611 // value. It then splits them into a separate Phi, and according Region."
1612 //
1613 // Example:
1614 // 130 DecodeN === _ 129
1615 // 50 ConP === 0 [[ 18 91 99 18 ]] #null
1616 // 18 Phi === 14 50 130 50 [[ 133 ]] #java/lang/Object * Oop:java/lang/Object *
1617 //
1618 // turns into:
1619 //
1620 // 50 ConP === 0 [[ 99 91 18 ]] #null
1621 // 130 DecodeN === _ 129 [[ 18 ]]
1622 // 18 Phi === 14 130 50 [[ 133 ]] #java/lang/Object * Oop:java/lang/Object *
1623 //
1624 // We would have to investigate why this optimization does not happen during IGVN.
1625 // There could also be other issues - I did not investigate further yet.
1626 //
1627 // Found with:
1628 // java -XX:VerifyIterativeGVN=0100 -Xcomp --version
1629 case Op_Phi:
1630 return;
1631
1632 // MemBarNode::Ideal does "Eliminate volatile MemBars for scalar replaced objects".
1633 // For examle "The allocated object does not escape".
1634 //
1635 // It seems the difference to earlier calls to MemBarNode::Ideal, is that there
1636 // alloc->as_Allocate()->does_not_escape_thread() returned false, but in verification
1637 // it returned true. Why does the MemBarStoreStore not get added to the IGVN
1638 // worklist when this change happens?
1639 //
1640 // Found with:
1641 // java -XX:VerifyIterativeGVN=0100 -Xcomp --version
1642 case Op_MemBarStoreStore:
1643 return;
1644
1645 // ConvI2LNode::Ideal converts
1646 // 648 AddI === _ 583 645 [[ 661 ]]
1647 // 661 ConvI2L === _ 648 [[ 664 ]] #long:0..maxint-1:www
1648 // into
1649 // 772 ConvI2L === _ 645 [[ 773 ]] #long:-120..maxint-61:www
1650 // 771 ConvI2L === _ 583 [[ 773 ]] #long:60..120:www
1651 // 773 AddL === _ 771 772 [[ ]]
1652 //
1653 // We have to investigate why this does not happen during IGVN in this case.
1654 // There could also be other issues - I did not investigate further yet.
1655 //
1656 // Found with:
1657 // java -XX:VerifyIterativeGVN=0100 -Xcomp --version
1658 case Op_ConvI2L:
1659 return;
1660
1661 // AddNode::IdealIL can do this transform (and similar other ones):
1662 // Convert "a*b+a*c into a*(b+c)
1663 // The example had AddI(MulI(a, b), MulI(a, c)). Why did this not happen
1664 // during IGVN? There was a mutation for one of the MulI, and only
1665 // after that the pattern was as needed for the optimization. The MulI
1666 // was added to the IGVN worklist, but not the AddI. This probably
1667 // can be fixed by adding the correct pattern in add_users_of_use_to_worklist.
1668 //
1669 // Found with:
1670 // test/hotspot/jtreg/compiler/loopopts/superword/ReductionPerf.java
1671 // -XX:VerifyIterativeGVN=1110
1672 case Op_AddI:
1673 return;
1674
1675 // ArrayCopyNode::Ideal
1676 // calls ArrayCopyNode::prepare_array_copy
1677 // calls Compile::conv_I2X_index -> is called with sizetype = intcon(0), I think that
1678 // is not expected, and we create a range int:0..-1
1679 // calls Compile::constrained_convI2L -> creates ConvI2L(intcon(1), int:0..-1)
1680 // note: the type is already empty!
1681 // calls PhaseIterGVN::transform
1682 // calls PhaseIterGVN::transform_old
1683 // calls PhaseIterGVN::subsume_node -> subsume ConvI2L with TOP
1684 // calls Unique_Node_List::push -> pushes TOP to worklist
1685 //
1686 // Once we get back to ArrayCopyNode::prepare_array_copy, we get back TOP, and
1687 // return false. This means we eventually return nullptr from ArrayCopyNode::Ideal.
1688 //
1689 // Question: is it ok to push anything to the worklist during ::Ideal, if we will
1690 // return nullptr, indicating nothing happened?
1691 // Is it smart to do transform in Compile::constrained_convI2L, and then
1692 // check for TOP in calls ArrayCopyNode::prepare_array_copy?
1693 // Should we just allow TOP to land on the worklist, as an exception?
1694 //
1695 // Found with:
1696 // compiler/arraycopy/TestArrayCopyAsLoadsStores.java
1697 // -XX:VerifyIterativeGVN=1110
1698 case Op_ArrayCopy:
1699 return;
1700
1701 // CastLLNode::Ideal
1702 // calls ConstraintCastNode::optimize_integer_cast -> pushes CastLL through SubL
1703 //
1704 // Could be a notification issue, where updates inputs of CastLL do not notify
1705 // down through SubL to CastLL.
1706 //
1707 // Found With:
1708 // compiler/c2/TestMergeStoresMemorySegment.java#byte-array
1709 // -XX:VerifyIterativeGVN=1110
1710 case Op_CastLL:
1711 return;
1712
1713 // Similar case happens to CastII
1714 //
1715 // Found With:
1716 // compiler/c2/TestScalarReplacementMaxLiveNodes.java
1717 // -XX:VerifyIterativeGVN=1110
1718 case Op_CastII:
1719 return;
1720
1721 // MaxLNode::Ideal
1722 // calls AddNode::Ideal
1723 // calls commute -> decides to swap edges
1724 //
1725 // Another notification issue, because we check inputs of inputs?
1726 // MaxL -> Phi -> Loop
1727 // MaxL -> Phi -> MaxL
1728 //
1729 // Found with:
1730 // compiler/c2/irTests/TestIfMinMax.java
1731 // -XX:VerifyIterativeGVN=1110
1732 case Op_MaxL:
1733 case Op_MinL:
1734 return;
1735
1736 // OrINode::Ideal
1737 // calls AddNode::Ideal
1738 // calls commute -> left is Load, right not -> commute.
1739 //
1740 // Not sure why notification does not work here, seems like
1741 // the depth is only 1, so it should work. Needs investigation.
1742 //
1743 // Found with:
1744 // compiler/codegen/TestCharVect2.java#id0
1745 // -XX:VerifyIterativeGVN=1110
1746 case Op_OrI:
1747 case Op_OrL:
1748 return;
1749
1750 // Bool -> constant folded to 1.
1751 // Issue with notification?
1752 //
1753 // Found with:
1754 // compiler/c2/irTests/TestVectorizationMismatchedAccess.java
1755 // -XX:VerifyIterativeGVN=1110
1756 case Op_Bool:
1757 return;
1758
1759 // LShiftLNode::Ideal
1760 // Looks at pattern: "(x + x) << c0", converts it to "x << (c0 + 1)"
1761 // Probably a notification issue.
1762 //
1763 // Found with:
1764 // compiler/conversions/TestMoveConvI2LOrCastIIThruAddIs.java
1765 // -ea -esa -XX:CompileThreshold=100 -XX:+UnlockExperimentalVMOptions -server -XX:-TieredCompilation -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
1766 case Op_LShiftL:
1767 return;
1768
1769 // LShiftINode::Ideal
1770 // pattern: ((x + con1) << con2) -> x << con2 + con1 << con2
1771 // Could be issue with notification of inputs of inputs
1772 //
1773 // Side-note: should cases like these not be shared between
1774 // LShiftI and LShiftL?
1775 //
1776 // Found with:
1777 // compiler/escapeAnalysis/Test6689060.java
1778 // -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110 -ea -esa -XX:CompileThreshold=100 -XX:+UnlockExperimentalVMOptions -server -XX:-TieredCompilation -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
1779 case Op_LShiftI:
1780 return;
1781
1782 // AddPNode::Ideal seems to do set_req without removing lock first.
1783 // Found with various vector tests tier1-tier3.
1784 case Op_AddP:
1785 return;
1786
1787 // StrIndexOfNode::Ideal
1788 // Found in tier1-3.
1789 case Op_StrIndexOf:
1790 case Op_StrIndexOfChar:
1791 return;
1792
1793 // StrEqualsNode::Identity
1794 //
1795 // Found (linux x64 only?) with:
1796 // serviceability/sa/ClhsdbThreadContext.java
1797 // -XX:+UnlockExperimentalVMOptions -XX:LockingMode=1 -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
1798 // Note: The -XX:LockingMode option is not available anymore.
1799 case Op_StrEquals:
1800 return;
1801
1802 // AryEqNode::Ideal
1803 // Not investigated. Reshapes itself and adds lots of nodes to the worklist.
1804 //
1805 // Found with:
1806 // vmTestbase/vm/mlvm/meth/stress/compiler/i2c_c2i/Test.java
1807 // -XX:+UnlockDiagnosticVMOptions -XX:-TieredCompilation -XX:+StressUnstableIfTraps -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
1808 case Op_AryEq:
1809 return;
1810
1811 // MergeMemNode::Ideal
1812 // Found in tier1-3. Did not investigate further yet.
1813 case Op_MergeMem:
1814 return;
1815
1816 // CMoveINode::Ideal
1817 // Found in tier1-3. Did not investigate further yet.
1818 case Op_CMoveI:
1819 return;
1820
1821 // CmpPNode::Ideal calls isa_const_java_mirror
1822 // and generates new constant nodes, even if no progress is made.
1823 // We can probably rewrite this so that only types are generated.
1824 // It seems that object types are not hashed, we could investigate
1825 // if that is an option as well.
1826 //
1827 // Found with:
1828 // java -XX:VerifyIterativeGVN=1110 -Xcomp --version
1829 case Op_CmpP:
1830 return;
1831
1832 // MinINode::Ideal
1833 // Did not investigate, but there are some patterns that might
1834 // need more notification.
1835 case Op_MinI:
1836 case Op_MaxI: // preemptively removed it as well.
1837 return;
1838 }
1839
1840 if (n->is_Store()) {
1841 // StoreNode::Ideal can do this:
1842 // // Capture an unaliased, unconditional, simple store into an initializer.
1843 // // Or, if it is independent of the allocation, hoist it above the allocation.
1844 // That replaces the Store with a MergeMem.
1845 //
1846 // We have to investigate why this does not happen during IGVN in this case.
1847 // There could also be other issues - I did not investigate further yet.
1848 //
1849 // Found with:
1850 // java -XX:VerifyIterativeGVN=0100 -Xcomp --version
1851 return;
1852 }
1853
1854 if (n->is_Vector()) {
1855 // VectorNode::Ideal swaps edges, but only for ops
1856 // that are deemed commutable. But swap_edges
1857 // requires the hash to be invariant when the edges
1858 // are swapped, which is not implemented for these
1859 // vector nodes. This seems not to create any trouble
1860 // usually, but we can also get graphs where in the
1861 // end the nodes are not all commuted, so there is
1862 // definitively an issue here.
1863 //
1864 // Probably we have two options: kill the hash, or
1865 // properly make the hash commutation friendly.
1866 //
1867 // Found with:
1868 // compiler/vectorapi/TestMaskedMacroLogicVector.java
1869 // -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110 -XX:+UseParallelGC -XX:+UseNUMA
1870 return;
1871 }
1872
1873 if (n->is_Region()) {
1874 // LoopNode::Ideal calls RegionNode::Ideal.
1875 // CountedLoopNode::Ideal calls RegionNode::Ideal too.
1876 // But I got an issue because RegionNode::optimize_trichotomy
1877 // then modifies another node, and pushes nodes to the worklist
1878 // Not sure if this is ok, modifying another node like that.
1879 // Maybe it is, then we need to look into what to do with
1880 // the nodes that are now on the worklist, maybe just clear
1881 // them out again. But maybe modifying other nodes like that
1882 // is also bad design. In the end, we return nullptr for
1883 // the current CountedLoop. But the extra nodes on the worklist
1884 // trip the asserts later on.
1885 //
1886 // Found with:
1887 // compiler/eliminateAutobox/TestShortBoxing.java
1888 // -ea -esa -XX:CompileThreshold=100 -XX:+UnlockExperimentalVMOptions -server -XX:-TieredCompilation -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
1889 return;
1890 }
1891
1892 if (n->is_CallJava()) {
1893 // CallStaticJavaNode::Ideal
1894 // Led to a crash:
1895 // assert((is_CallStaticJava() && cg->is_mh_late_inline()) || (is_CallDynamicJava() && cg->is_virtual_late_inline())) failed: mismatch
1896 //
1897 // Did not investigate yet, could be a bug.
1898 // Or maybe it does not expect to be called during verification.
1899 //
1900 // Found with:
1901 // test/jdk/jdk/incubator/vector/VectorRuns.java
1902 // -XX:VerifyIterativeGVN=1110
1903
1904 // CallDynamicJavaNode::Ideal, and I think also for CallStaticJavaNode::Ideal
1905 // and possibly their subclasses.
1906 // During late inlining it can call CallJavaNode::register_for_late_inline
1907 // That means we do more rounds of late inlining, but might fail.
1908 // Then we do IGVN again, and register the node again for late inlining.
1909 // This creates an endless cycle. Everytime we try late inlining, we
1910 // are also creating more nodes, especially SafePoint and MergeMem.
1911 // These nodes are immediately rejected when the inlining fails in the
1912 // do_late_inline_check, but they still grow the memory, until we hit
1913 // the MemLimit and crash.
1914 // The assumption here seems that CallDynamicJavaNode::Ideal does not get
1915 // called repeatedly, and eventually we terminate. I fear this is not
1916 // a great assumption to make. We should investigate more.
1917 //
1918 // Found with:
1919 // compiler/loopopts/superword/TestDependencyOffsets.java#vanilla-U
1920 // -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
1921 return;
1922 }
1923
1924 // Ideal should not make progress if it returns nullptr.
1925 // We use made_progress() rather than unique() or live_nodes() because some
1926 // Ideal implementations speculatively create nodes and kill them before
1927 // returning nullptr (e.g. split_if clones a Cmp to check is_canonical).
1928 // unique() is a high-water mark that is not decremented by remove_dead_node,
1929 // so it would cause false-positives. live_nodes() accounts for dead nodes but can
1930 // decrease when Ideal removes existing nodes as side effects.
1931 // made_progress() precisely tracks meaningful transforms, and speculative
1932 // work killed via NodeOrigin::Speculative does not increment it.
1933 uint old_progress = made_progress();
1934 // The hash of a node should not change, this would indicate different inputs
1935 uint old_hash = n->hash();
1936 // Remove 'n' from hash table in case it gets modified. We want to avoid
1937 // hitting the "Need to remove from hash before changing edges" assert if
1938 // a change occurs. Instead, we would like to proceed with the optimization,
1939 // return and finally hit the assert in PhaseIterGVN::verify_optimize to get
1940 // a more meaningful message
1941 _table.hash_delete(n);
1942 Node* i = n->Ideal(this, can_reshape);
1943 // If there was no new Idealization, we are probably happy.
1944 if (i == nullptr) {
1945 uint progress = made_progress() - old_progress;
1946 if (progress != 0) {
1947 stringStream ss; // Print as a block without tty lock.
1948 ss.cr();
1949 ss.print_cr("Ideal optimization did not make progress but had side effects.");
1950 ss.print_cr(" %u transforms made progress", progress);
1951 n->dump_bfs(1, nullptr, "", &ss);
1952 tty->print_cr("%s", ss.as_string());
1953 assert(false, "Unexpected side effects from applying Ideal optimization on %s", n->Name());
1954 }
1955
1956 if (old_hash != n->hash()) {
1957 stringStream ss; // Print as a block without tty lock.
1958 ss.cr();
1959 ss.print_cr("Ideal optimization did not make progress but node hash changed.");
1960 ss.print_cr(" old_hash = %d, hash = %d", old_hash, n->hash());
1961 n->dump_bfs(1, nullptr, "", &ss);
1962 tty->print_cr("%s", ss.as_string());
1963 assert(false, "Unexpected hash change from applying Ideal optimization on %s", n->Name());
1964 }
1965
1966 // Some nodes try to push itself back to the worklist if can_reshape is
1967 // false
1968 if (!can_reshape && _worklist.size() > 0 && _worklist.pop() != n) {
1969 stringStream ss;
1970 ss.cr();
1971 ss.print_cr("Previously optimized:");
1972 n->dump_bfs(1, nullptr, "", &ss);
1973 tty->print_cr("%s", ss.as_string());
1974 assert(false, "should only push itself on worklist");
1975 }
1976 verify_empty_worklist(n);
1977
1978 // Everything is good.
1979 hash_find_insert(n);
1980 return;
1981 }
1982
1983 // We just saw a new Idealization which was not done during IGVN.
1984 stringStream ss; // Print as a block without tty lock.
1985 ss.cr();
1986 ss.print_cr("Missed Ideal optimization (can_reshape=%s):", can_reshape ? "true": "false");
1987 if (i == n) {
1988 ss.print_cr("The node was reshaped by Ideal.");
1989 } else {
1990 ss.print_cr("The node was replaced by Ideal.");
1991 ss.print_cr("Old node:");
1992 n->dump_bfs(1, nullptr, "", &ss);
1993 }
1994 ss.print_cr("The result after Ideal:");
1995 i->dump_bfs(1, nullptr, "", &ss);
1996 tty->print_cr("%s", ss.as_string());
1997
1998 assert(false, "Missed Ideal optimization opportunity in PhaseIterGVN for %s", n->Name());
1999 }
2000
2001 // Check that all Identity optimizations that could be done were done.
2002 // Asserts if it found missed optimization opportunities, and
2003 // returns normally otherwise (no missed optimization, or skipped verification).
2004 void PhaseIterGVN::verify_Identity_for(Node* n) {
2005 // First, we check a list of exceptions, where we skip verification,
2006 // because there are known cases where Ideal can optimize after IGVN.
2007 // Some may be expected and cannot be fixed, and others should be fixed.
2008 switch (n->Opcode()) {
2009 // SafePointNode::Identity can remove SafePoints, but wants to wait until
2010 // after loopopts:
2011 // // Transforming long counted loops requires a safepoint node. Do not
2012 // // eliminate a safepoint until loop opts are over.
2013 // if (in(0)->is_Proj() && !phase->C->major_progress()) {
2014 //
2015 // I think the check for major_progress does delay it until after loopopts
2016 // but it does not ensure that the node is on the IGVN worklist after
2017 // loopopts. I think we should try to instead check for
2018 // phase->C->post_loop_opts_phase() and call record_for_post_loop_opts_igvn.
2019 //
2020 // Found with:
2021 // java -XX:VerifyIterativeGVN=1000 -Xcomp --version
2022 case Op_SafePoint:
2023 return;
2024
2025 // MergeMemNode::Identity replaces the MergeMem with its base_memory if it
2026 // does not record any other memory splits.
2027 //
2028 // I did not deeply investigate, but it looks like MergeMemNode::Identity
2029 // never got called during IGVN for this node, investigate why.
2030 //
2031 // Found with:
2032 // java -XX:VerifyIterativeGVN=1000 -Xcomp --version
2033 case Op_MergeMem:
2034 return;
2035
2036 // ConstraintCastNode::Identity finds casts that are the same, except that
2037 // the control is "higher up", i.e. dominates. The call goes via
2038 // ConstraintCastNode::dominating_cast to PhaseGVN::is_dominator_helper,
2039 // which traverses up to 100 idom steps. If anything gets optimized somewhere
2040 // away from the cast, but within 100 idom steps, the cast may not be
2041 // put on the IGVN worklist any more.
2042 //
2043 // Found with:
2044 // java -XX:VerifyIterativeGVN=1000 -Xcomp --version
2045 case Op_CastPP:
2046 case Op_CastII:
2047 case Op_CastLL:
2048 return;
2049
2050 // Same issue for CheckCastPP, uses ConstraintCastNode::Identity and
2051 // checks dominator, which may be changed, but too far up for notification
2052 // to work.
2053 //
2054 // Found with:
2055 // compiler/c2/irTests/TestSkeletonPredicates.java
2056 // -XX:VerifyIterativeGVN=1110
2057 case Op_CheckCastPP:
2058 return;
2059
2060 // In SubNode::Identity, we do:
2061 // Convert "(X+Y) - Y" into X and "(X+Y) - X" into Y
2062 // In the example, the AddI had an input replaced, the AddI is
2063 // added to the IGVN worklist, but the SubI is one link further
2064 // down and is not added. I checked add_users_of_use_to_worklist
2065 // where I would expect the SubI would be added, and I cannot
2066 // find the pattern, only this one:
2067 // If changed AddI/SubI inputs, check CmpU for range check optimization.
2068 //
2069 // Fix this "notification" issue and check if there are any other
2070 // issues.
2071 //
2072 // Found with:
2073 // java -XX:VerifyIterativeGVN=1000 -Xcomp --version
2074 case Op_SubI:
2075 case Op_SubL:
2076 return;
2077
2078 // PhiNode::Identity checks for patterns like:
2079 // r = (x != con) ? x : con;
2080 // that can be constant folded to "x".
2081 //
2082 // Call goes through PhiNode::is_cmove_id and CMoveNode::is_cmove_id.
2083 // I suspect there was some earlier change to one of the inputs, but
2084 // not all relevant outputs were put on the IGVN worklist.
2085 //
2086 // Found with:
2087 // test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithG1.java
2088 // -XX:VerifyIterativeGVN=1110
2089 case Op_Phi:
2090 return;
2091
2092 // ConvI2LNode::Identity does
2093 // convert I2L(L2I(x)) => x
2094 //
2095 // Investigate why this did not already happen during IGVN.
2096 //
2097 // Found with:
2098 // compiler/loopopts/superword/TestDependencyOffsets.java#vanilla-A
2099 // -XX:VerifyIterativeGVN=1110
2100 case Op_ConvI2L:
2101 return;
2102
2103 // AbsINode::Identity
2104 // Not investigated yet.
2105 case Op_AbsI:
2106 return;
2107 }
2108
2109 if (n->is_Load()) {
2110 // LoadNode::Identity tries to look for an earlier store value via
2111 // can_see_stored_value. I found an example where this led to
2112 // an Allocation, where we could assume the value was still zero.
2113 // So the LoadN can be replaced with a zerocon.
2114 //
2115 // Investigate why this was not already done during IGVN.
2116 // A similar issue happens with Ideal.
2117 //
2118 // Found with:
2119 // java -XX:VerifyIterativeGVN=1000 -Xcomp --version
2120 return;
2121 }
2122
2123 if (n->is_Store()) {
2124 // StoreNode::Identity
2125 // Not investigated, but found missing optimization for StoreI.
2126 // Looks like a StoreI is replaced with an InitializeNode.
2127 //
2128 // Found with:
2129 // applications/ctw/modules/java_base_2.java
2130 // -ea -esa -XX:CompileThreshold=100 -XX:+UnlockExperimentalVMOptions -server -XX:-TieredCompilation -Djava.awt.headless=true -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
2131 return;
2132 }
2133
2134 if (n->is_Vector()) {
2135 // Found with tier1-3. Not investigated yet.
2136 // The observed issue was with AndVNode::Identity and
2137 // VectorStoreMaskNode::Identity (see JDK-8370863).
2138 //
2139 // Found with:
2140 // compiler/vectorapi/VectorStoreMaskIdentityTest.java
2141 // -XX:CompileThreshold=100 -XX:-TieredCompilation -XX:VerifyIterativeGVN=1110
2142 return;
2143 }
2144
2145 Node* i = n->Identity(this);
2146 // If we cannot find any other Identity, we are happy.
2147 if (i == n) {
2148 verify_empty_worklist(n);
2149 return;
2150 }
2151
2152 // The verification just found a new Identity that was not found during IGVN.
2153 stringStream ss; // Print as a block without tty lock.
2154 ss.cr();
2155 ss.print_cr("Missed Identity optimization:");
2156 ss.print_cr("Old node:");
2157 n->dump_bfs(1, nullptr, "", &ss);
2158 ss.print_cr("New node:");
2159 i->dump_bfs(1, nullptr, "", &ss);
2160 tty->print_cr("%s", ss.as_string());
2161
2162 assert(false, "Missed Identity optimization opportunity in PhaseIterGVN for %s", n->Name());
2163 }
2164
2165 // Some other verifications that are not specific to a particular transformation.
2166 void PhaseIterGVN::verify_node_invariants_for(const Node* n) {
2167 if (n->is_AddP()) {
2168 if (!n->as_AddP()->address_input_has_same_base()) {
2169 stringStream ss; // Print as a block without tty lock.
2170 ss.cr();
2171 ss.print_cr("Base pointers must match for AddP chain:");
2172 n->dump_bfs(2, nullptr, "", &ss);
2173 tty->print_cr("%s", ss.as_string());
2174
2175 assert(false, "Broken node invariant for %s", n->Name());
2176 }
2177 }
2178 }
2179 #endif
2180
2181 /**
2182 * Register a new node with the optimizer. Update the types array, the def-use
2183 * info. Put on worklist.
2184 */
2185 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
2186 set_type_bottom(n);
2187 _worklist.push(n);
2188 if (orig != nullptr) C->copy_node_notes_to(n, orig);
2189 return n;
2190 }
2191
2192 //------------------------------transform--------------------------------------
2193 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
2194 Node *PhaseIterGVN::transform( Node *n ) {
2195 // If brand new node, make space in type array, and give it a type.
2196 ensure_type_or_null(n);
2197 if (type_or_null(n) == nullptr) {
2198 set_type_bottom(n);
2199 }
2200
2201 if (_delay_transform) {
2202 // Add the node to the worklist but don't optimize for now
2203 _worklist.push(n);
2204 return n;
2205 }
2206
2207 return transform_old(n);
2208 }
2209
2210 Node *PhaseIterGVN::transform_old(Node* n) {
2211 NOT_PRODUCT(set_transforms());
2212 // Remove 'n' from hash table in case it gets modified
2213 _table.hash_delete(n);
2214 #ifdef ASSERT
2215 if (is_verify_def_use()) {
2216 assert(!_table.find_index(n->_idx), "found duplicate entry in table");
2217 }
2218 #endif
2219
2220 // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
2221 if (n->is_Cmp()) {
2222 add_users_to_worklist(n);
2223 }
2224
2225 // Apply the Ideal call in a loop until it no longer applies
2226 Node* k = n;
2227 DEBUG_ONLY(dead_loop_check(k);)
2228 DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
2229 C->remove_modified_node(k);
2230 #ifndef PRODUCT
2231 uint hash_before = is_verify_Ideal_return() ? k->hash() : 0;
2232 #endif
2233 Node* i = apply_ideal(k, /*can_reshape=*/true);
2234 assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
2235 #ifndef PRODUCT
2236 if (is_verify_Ideal_return()) {
2237 assert(k->outcnt() == 0 || i != nullptr || hash_before == k->hash(), "hash changed after Ideal returned nullptr for %s", k->Name());
2238 }
2239 verify_step(k);
2240 #endif
2241
2242 DEBUG_ONLY(uint loop_count = 1;)
2243 if (i != nullptr) {
2244 set_progress();
2245 }
2246 while (i != nullptr) {
2247 #ifdef ASSERT
2248 if (loop_count >= K + C->live_nodes()) {
2249 dump_infinite_loop_info(i, "PhaseIterGVN::transform_old");
2250 }
2251 #endif
2252 assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes");
2253 // Made a change; put users of original Node on worklist
2254 add_users_to_worklist(k);
2255 // Replacing root of transform tree?
2256 if (k != i) {
2257 // Make users of old Node now use new.
2258 subsume_node(k, i);
2259 k = i;
2260 }
2261 DEBUG_ONLY(dead_loop_check(k);)
2262 // Try idealizing again
2263 DEBUG_ONLY(is_new = (k->outcnt() == 0);)
2264 C->remove_modified_node(k);
2265 #ifndef PRODUCT
2266 uint hash_before = is_verify_Ideal_return() ? k->hash() : 0;
2267 #endif
2268 i = apply_ideal(k, /*can_reshape=*/true);
2269 assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes");
2270 #ifndef PRODUCT
2271 if (is_verify_Ideal_return()) {
2272 assert(k->outcnt() == 0 || i != nullptr || hash_before == k->hash(), "hash changed after Ideal returned nullptr for %s", k->Name());
2273 }
2274 verify_step(k);
2275 #endif
2276 DEBUG_ONLY(loop_count++;)
2277 }
2278
2279 // If brand new node, make space in type array.
2280 ensure_type_or_null(k);
2281
2282 // See what kind of values 'k' takes on at runtime
2283 const Type* t = k->Value(this);
2284 assert(t != nullptr, "value sanity");
2285
2286 // Since I just called 'Value' to compute the set of run-time values
2287 // for this Node, and 'Value' is non-local (and therefore expensive) I'll
2288 // cache Value. Later requests for the local phase->type of this Node can
2289 // use the cached Value instead of suffering with 'bottom_type'.
2290 if (type_or_null(k) != t) {
2291 NOT_PRODUCT(inc_new_values();)
2292 set_progress();
2293 set_type(k, t);
2294 // If k is a TypeNode, capture any more-precise type permanently into Node
2295 k->raise_bottom_type(t);
2296 // Move users of node to worklist
2297 add_users_to_worklist(k);
2298 }
2299 // If 'k' computes a constant, replace it with a constant
2300 if (t->singleton() && !k->is_Con()) {
2301 set_progress();
2302 Node* con = makecon(t); // Make a constant
2303 add_users_to_worklist(k);
2304 subsume_node(k, con); // Everybody using k now uses con
2305 return con;
2306 }
2307
2308 // Now check for Identities
2309 i = k->Identity(this); // Look for a nearby replacement
2310 if (i != k) { // Found? Return replacement!
2311 set_progress();
2312 add_users_to_worklist(k);
2313 subsume_node(k, i); // Everybody using k now uses i
2314 return i;
2315 }
2316
2317 // Global Value Numbering
2318 i = hash_find_insert(k); // Check for pre-existing node
2319 if (i && (i != k)) {
2320 // Return the pre-existing node if it isn't dead
2321 set_progress();
2322 add_users_to_worklist(k);
2323 subsume_node(k, i); // Everybody using k now uses i
2324 return i;
2325 }
2326
2327 // Return Idealized original
2328 return k;
2329 }
2330
2331 //---------------------------------saturate------------------------------------
2332 const Type* PhaseIterGVN::saturate(const Type* new_type, const Type* old_type,
2333 const Type* limit_type) const {
2334 return new_type->narrow(old_type);
2335 }
2336
2337 //------------------------------remove_globally_dead_node----------------------
2338 // Kill a globally dead Node. All uses are also globally dead and are
2339 // aggressively trimmed.
2340 void PhaseIterGVN::remove_globally_dead_node(Node* dead, NodeOrigin origin) {
2341 enum DeleteProgress {
2342 PROCESS_INPUTS,
2343 PROCESS_OUTPUTS
2344 };
2345 ResourceMark rm;
2346 Node_Stack stack(32);
2347 stack.push(dead, PROCESS_INPUTS);
2348
2349 while (stack.is_nonempty()) {
2350 dead = stack.node();
2351 if (dead->Opcode() == Op_SafePoint) {
2352 dead->as_SafePoint()->disconnect_from_root(this);
2353 }
2354 uint progress_state = stack.index();
2355 assert(dead != C->root(), "killing root, eh?");
2356 assert(!dead->is_top(), "add check for top when pushing");
2357 if (progress_state == PROCESS_INPUTS) {
2358 // After following inputs, continue to outputs
2359 stack.set_index(PROCESS_OUTPUTS);
2360 if (!dead->is_Con()) { // Don't kill cons but uses
2361 if (origin != NodeOrigin::Speculative) {
2362 set_progress();
2363 }
2364 bool recurse = false;
2365 // Remove from hash table
2366 _table.hash_delete( dead );
2367 // Smash all inputs to 'dead', isolating him completely
2368 for (uint i = 0; i < dead->req(); i++) {
2369 Node *in = dead->in(i);
2370 if (in != nullptr && in != C->top()) { // Points to something?
2371 int nrep = dead->replace_edge(in, nullptr, this); // Kill edges
2372 assert((nrep > 0), "sanity");
2373 if (in->outcnt() == 0) { // Made input go dead?
2374 stack.push(in, PROCESS_INPUTS); // Recursively remove
2375 recurse = true;
2376 } else if (in->outcnt() == 1 &&
2377 in->has_special_unique_user()) {
2378 _worklist.push(in->unique_out());
2379 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2380 if (in->Opcode() == Op_Region) {
2381 _worklist.push(in);
2382 } else if (in->is_Store()) {
2383 DUIterator_Fast imax, i = in->fast_outs(imax);
2384 _worklist.push(in->fast_out(i));
2385 i++;
2386 if (in->outcnt() == 2) {
2387 _worklist.push(in->fast_out(i));
2388 i++;
2389 }
2390 assert(!(i < imax), "sanity");
2391 }
2392 } else if (dead->is_data_proj_of_pure_function(in)) {
2393 _worklist.push(in);
2394 } else {
2395 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in);
2396 }
2397 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2398 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2399 // A Load that directly follows an InitializeNode is
2400 // going away. The Stores that follow are candidates
2401 // again to be captured by the InitializeNode.
2402 add_users_to_worklist_if(_worklist, in, [](Node* n) { return n->is_Store(); });
2403 }
2404 } // if (in != nullptr && in != C->top())
2405 } // for (uint i = 0; i < dead->req(); i++)
2406 if (recurse) {
2407 continue;
2408 }
2409 } // if (!dead->is_Con())
2410 } // if (progress_state == PROCESS_INPUTS)
2411
2412 // Aggressively kill globally dead uses
2413 // (Rather than pushing all the outs at once, we push one at a time,
2414 // plus the parent to resume later, because of the indefinite number
2415 // of edge deletions per loop trip.)
2416 if (dead->outcnt() > 0) {
2417 // Recursively remove output edges
2418 stack.push(dead->raw_out(0), PROCESS_INPUTS);
2419 } else {
2420 // Finished disconnecting all input and output edges.
2421 stack.pop();
2422 // Remove dead node from iterative worklist
2423 _worklist.remove(dead);
2424 C->remove_useless_node(dead);
2425 }
2426 } // while (stack.is_nonempty())
2427 }
2428
2429 //------------------------------subsume_node-----------------------------------
2430 // Remove users from node 'old' and add them to node 'nn'.
2431 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
2432 if (old->Opcode() == Op_SafePoint) {
2433 old->as_SafePoint()->disconnect_from_root(this);
2434 }
2435 assert( old != hash_find(old), "should already been removed" );
2436 assert( old != C->top(), "cannot subsume top node");
2437 // Copy debug or profile information to the new version:
2438 C->copy_node_notes_to(nn, old);
2439 // Move users of node 'old' to node 'nn'
2440 for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
2441 Node* use = old->last_out(i); // for each use...
2442 // use might need re-hashing (but it won't if it's a new node)
2443 rehash_node_delayed(use);
2444 // Update use-def info as well
2445 // We remove all occurrences of old within use->in,
2446 // so as to avoid rehashing any node more than once.
2447 // The hash table probe swamps any outer loop overhead.
2448 uint num_edges = 0;
2449 for (uint jmax = use->len(), j = 0; j < jmax; j++) {
2450 if (use->in(j) == old) {
2451 use->set_req(j, nn);
2452 ++num_edges;
2453 }
2454 }
2455 i -= num_edges; // we deleted 1 or more copies of this edge
2456 }
2457
2458 // Search for instance field data PhiNodes in the same region pointing to the old
2459 // memory PhiNode and update their instance memory ids to point to the new node.
2460 if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != nullptr) {
2461 Node* region = old->in(0);
2462 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2463 PhiNode* phi = region->fast_out(i)->isa_Phi();
2464 if (phi != nullptr && phi->inst_mem_id() == (int)old->_idx) {
2465 phi->set_inst_mem_id((int)nn->_idx);
2466 }
2467 }
2468 }
2469
2470 // Smash all inputs to 'old', isolating him completely
2471 Node *temp = new Node(1);
2472 temp->init_req(0,nn); // Add a use to nn to prevent him from dying
2473 remove_dead_node(old, NodeOrigin::Graph);
2474 temp->del_req(0); // Yank bogus edge
2475 if (nn != nullptr && nn->outcnt() == 0) {
2476 _worklist.push(nn);
2477 }
2478 #ifndef PRODUCT
2479 if (is_verify_def_use()) {
2480 for ( int i = 0; i < _verify_window_size; i++ ) {
2481 if ( _verify_window[i] == old )
2482 _verify_window[i] = nn;
2483 }
2484 }
2485 #endif
2486 temp->destruct(this); // reuse the _idx of this little guy
2487 }
2488
2489 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
2490 assert(n != nullptr, "sanity");
2491 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2492 Node* u = n->fast_out(i);
2493 if (u != n) {
2494 rehash_node_delayed(u);
2495 int nb = u->replace_edge(n, m);
2496 --i, imax -= nb;
2497 }
2498 }
2499 assert(n->outcnt() == 0, "all uses must be deleted");
2500 }
2501
2502 //------------------------------add_users_to_worklist--------------------------
2503 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
2504 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2505 worklist.push(n->fast_out(i)); // Push on worklist
2506 }
2507 }
2508
2509 // Return counted loop Phi if as a counted loop exit condition, cmp
2510 // compares the induction variable with n
2511 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
2512 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
2513 Node* bol = cmp->fast_out(i);
2514 for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
2515 Node* iff = bol->fast_out(i2);
2516 if (iff->is_BaseCountedLoopEnd()) {
2517 BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
2518 if (cle->limit() == n) {
2519 PhiNode* phi = cle->phi();
2520 if (phi != nullptr) {
2521 return phi;
2522 }
2523 }
2524 }
2525 }
2526 }
2527 return nullptr;
2528 }
2529
2530 void PhaseIterGVN::add_users_to_worklist(Node *n) {
2531 add_users_to_worklist0(n, _worklist);
2532
2533 Unique_Node_List& worklist = _worklist;
2534 // Move users of node to worklist
2535 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2536 Node* use = n->fast_out(i); // Get use
2537 add_users_of_use_to_worklist(n, use, worklist);
2538 }
2539 }
2540
2541 void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist) {
2542 if(use->is_Multi() || // Multi-definer? Push projs on worklist
2543 use->is_Store() ) // Enable store/load same address
2544 add_users_to_worklist0(use, worklist);
2545
2546 // If we changed the receiver type to a call, we need to revisit
2547 // the Catch following the call. It's looking for a non-null
2548 // receiver to know when to enable the regular fall-through path
2549 // in addition to the NullPtrException path.
2550 if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
2551 Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
2552 if (p != nullptr) {
2553 add_users_to_worklist0(p, worklist);
2554 }
2555 }
2556
2557 // AndLNode::Ideal folds GraphKit::mark_word_test patterns. Give it a chance to run.
2558 if (n->is_Load() && use->is_Phi()) {
2559 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2560 Node* u = use->fast_out(i);
2561 if (u->Opcode() == Op_AndL) {
2562 worklist.push(u);
2563 }
2564 }
2565 }
2566
2567 uint use_op = use->Opcode();
2568 if(use->is_Cmp()) { // Enable CMP/BOOL optimization
2569 add_users_to_worklist0(use, worklist); // Put Bool on worklist
2570 if (use->outcnt() > 0) {
2571 Node* bol = use->raw_out(0);
2572 if (bol->outcnt() > 0) {
2573 Node* iff = bol->raw_out(0);
2574 if (iff->outcnt() == 2) {
2575 // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
2576 // phi merging either 0 or 1 onto the worklist
2577 Node* ifproj0 = iff->raw_out(0);
2578 Node* ifproj1 = iff->raw_out(1);
2579 if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
2580 Node* region0 = ifproj0->raw_out(0);
2581 Node* region1 = ifproj1->raw_out(0);
2582 if( region0 == region1 )
2583 add_users_to_worklist0(region0, worklist);
2584 }
2585 }
2586 }
2587 }
2588 if (use_op == Op_CmpI || use_op == Op_CmpL) {
2589 Node* phi = countedloop_phi_from_cmp(use->as_Cmp(), n);
2590 if (phi != nullptr) {
2591 // Input to the cmp of a loop exit check has changed, thus
2592 // the loop limit may have changed, which can then change the
2593 // range values of the trip-count Phi.
2594 worklist.push(phi);
2595 }
2596 }
2597 if (use_op == Op_CmpI) {
2598 Node* cmp = use;
2599 Node* in1 = cmp->in(1);
2600 Node* in2 = cmp->in(2);
2601 // Notify CmpI / If pattern from CastIINode::Value (left pattern).
2602 // Must also notify if in1 is modified and possibly turns into X (right pattern).
2603 //
2604 // in1 in2 in1 in2
2605 // | | | |
2606 // +--- | --+ | |
2607 // | | | | |
2608 // CmpINode | CmpINode
2609 // | | |
2610 // BoolNode | BoolNode
2611 // | | OR |
2612 // IfNode | IfNode
2613 // | | |
2614 // IfProj | IfProj X
2615 // | | | |
2616 // CastIINode CastIINode
2617 //
2618 if (in1 != in2) { // if they are equal, the CmpI can fold them away
2619 if (in1 == n) {
2620 // in1 modified -> could turn into X -> do traversal based on right pattern.
2621 for (DUIterator_Fast i2max, i2 = cmp->fast_outs(i2max); i2 < i2max; i2++) {
2622 Node* bol = cmp->fast_out(i2); // For each Bool
2623 if (bol->is_Bool()) {
2624 for (DUIterator_Fast i3max, i3 = bol->fast_outs(i3max); i3 < i3max; i3++) {
2625 Node* iff = bol->fast_out(i3); // For each If
2626 if (iff->is_If()) {
2627 for (DUIterator_Fast i4max, i4 = iff->fast_outs(i4max); i4 < i4max; i4++) {
2628 Node* if_proj = iff->fast_out(i4); // For each IfProj
2629 assert(if_proj->is_IfProj(), "If only has IfTrue and IfFalse as outputs");
2630 for (DUIterator_Fast i5max, i5 = if_proj->fast_outs(i5max); i5 < i5max; i5++) {
2631 Node* castii = if_proj->fast_out(i5); // For each CastII
2632 if (castii->is_CastII() &&
2633 castii->as_CastII()->carry_dependency()) {
2634 worklist.push(castii);
2635 }
2636 }
2637 }
2638 }
2639 }
2640 }
2641 }
2642 } else {
2643 // Only in2 modified -> can assume X == in2 (left pattern).
2644 assert(n == in2, "only in2 modified");
2645 // Find all CastII with input in1.
2646 for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
2647 Node* castii = in1->fast_out(j);
2648 if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
2649 // Find If.
2650 if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
2651 Node* ifnode = castii->in(0)->in(0);
2652 // Check that if connects to the cmp
2653 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
2654 worklist.push(castii);
2655 }
2656 }
2657 }
2658 }
2659 }
2660 }
2661 }
2662 }
2663
2664 // Inline type nodes can have other inline types as users. If an input gets
2665 // updated, make sure that inline type users get a chance for optimization.
2666 if (use->is_InlineType() || use->is_DecodeN()) {
2667 auto push_the_uses_to_worklist = [&](Node* n){
2668 if (n->is_InlineType()) {
2669 worklist.push(n);
2670 }
2671 };
2672 auto is_boundary = [](Node* n){ return !n->is_InlineType(); };
2673 use->visit_uses(push_the_uses_to_worklist, is_boundary, true);
2674 }
2675 // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
2676 // Patterns:
2677 // ConstraintCast+ -> Sub
2678 // ConstraintCast+ -> Phi
2679 // ConstraintCast+ -> Xor
2680 if (use->is_ConstraintCast()) {
2681 auto push_the_uses_to_worklist = [&](Node* n){
2682 if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
2683 worklist.push(n);
2684 }
2685 };
2686 auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
2687 use->visit_uses(push_the_uses_to_worklist, is_boundary);
2688 }
2689 // If changed LShift inputs, check RShift/URShift users for
2690 // "(X << C) >> C" sign-ext and "(X << C) >>> C" zero-ext optimizations.
2691 if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
2692 add_users_to_worklist_if(worklist, use, [](Node* u) {
2693 return u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL ||
2694 u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL;
2695 });
2696 }
2697 // If changed LShift inputs, check And users for shift and mask (And) operation
2698 if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
2699 add_users_to_worklist_if(worklist, use, [](Node* u) {
2700 return u->Opcode() == Op_AndI || u->Opcode() == Op_AndL;
2701 });
2702 }
2703 // If changed AddI/SubI inputs, check CmpU for range check optimization.
2704 if (use_op == Op_AddI || use_op == Op_SubI) {
2705 add_users_to_worklist_if(worklist, use, [](Node* u) {
2706 return u->Opcode() == Op_CmpU;
2707 });
2708 }
2709 // If changed AddI/AddL inputs, check URShift users for
2710 // "((X << z) + Y) >>> z" optimization in URShift{I,L}Node::Ideal.
2711 if (use_op == Op_AddI || use_op == Op_AddL) {
2712 add_users_to_worklist_if(worklist, use, [](Node* u) {
2713 return u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL;
2714 });
2715 }
2716 // If changed LShiftI/LShiftL inputs, check AddI/AddL users for their
2717 // URShiftI/URShiftL users for "((x << z) + y) >>> z" optimization opportunity
2718 // (see URShiftINode::Ideal). Handles the case where the LShift input changes.
2719 if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
2720 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2721 Node* add = use->fast_out(i2);
2722 if (add->Opcode() == Op_AddI || add->Opcode() == Op_AddL) {
2723 add_users_to_worklist_if(worklist, add, [](Node* u) {
2724 return u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL;
2725 });
2726 }
2727 }
2728 }
2729 // If changed AndI/AndL inputs, check RShift/URShift users for "(x & mask) >> shift" optimization opportunity
2730 if (use_op == Op_AndI || use_op == Op_AndL) {
2731 add_users_to_worklist_if(worklist, use, [](Node* u) {
2732 return u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL ||
2733 u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL;
2734 });
2735 }
2736 // Check for redundant conversion patterns:
2737 // ConvD2L->ConvL2D->ConvD2L
2738 // ConvF2I->ConvI2F->ConvF2I
2739 // ConvF2L->ConvL2F->ConvF2L
2740 // ConvI2F->ConvF2I->ConvI2F
2741 // Note: there may be other 3-nodes conversion chains that would require to be added here, but these
2742 // are the only ones that are known to trigger missed optimizations otherwise
2743 if (use_op == Op_ConvL2D ||
2744 use_op == Op_ConvI2F ||
2745 use_op == Op_ConvL2F ||
2746 use_op == Op_ConvF2I) {
2747 add_users_to_worklist_if(worklist, use, [=](Node* u) {
2748 return (use_op == Op_ConvL2D && u->Opcode() == Op_ConvD2L) ||
2749 (use_op == Op_ConvI2F && u->Opcode() == Op_ConvF2I) ||
2750 (use_op == Op_ConvL2F && u->Opcode() == Op_ConvF2L) ||
2751 (use_op == Op_ConvF2I && u->Opcode() == Op_ConvI2F);
2752 });
2753 }
2754 // ConvD2F::Ideal matches ConvD2F(SqrtD(ConvF2D(x))) => SqrtF(x).
2755 // Notify ConvD2F users of SqrtD when any input of the SqrtD changes.
2756 if (use_op == Op_SqrtD) {
2757 add_users_to_worklist_if(worklist, use, [](Node* u) { return u->Opcode() == Op_ConvD2F; });
2758 }
2759 // ConvF2HF::Ideal matches ConvF2HF(binopF(ConvHF2F(...))) => FP16BinOp(...).
2760 // Notify ConvF2HF users of float binary ops when any input changes.
2761 if (Float16NodeFactory::is_float32_binary_oper(use_op)) {
2762 add_users_to_worklist_if(worklist, use, [](Node* u) { return u->Opcode() == Op_ConvF2HF; });
2763 }
2764 // If changed AddP inputs:
2765 // - check Stores for loop invariant, and
2766 // - if the changed input is the offset, check constant-offset AddP users for
2767 // address expression flattening.
2768 if (use_op == Op_AddP) {
2769 bool offset_changed = n == use->in(AddPNode::Offset);
2770 add_users_to_worklist_if(worklist, use, [=](Node* u) {
2771 return u->is_Mem() ||
2772 (offset_changed && u->is_AddP() && u->in(AddPNode::Offset)->is_Con());
2773 });
2774 }
2775 // Check for "abs(0-x)" into "abs(x)" conversion
2776 if (use->is_Sub()) {
2777 add_users_to_worklist_if(worklist, use, [](Node* u) {
2778 return u->Opcode() == Op_AbsD || u->Opcode() == Op_AbsF ||
2779 u->Opcode() == Op_AbsL || u->Opcode() == Op_AbsI;
2780 });
2781 }
2782 // Check for Max/Min(A, Max/Min(B, C)) where A == B or A == C
2783 if (use->is_MinMax()) {
2784 add_users_to_worklist_if(worklist, use, [](Node* u) { return u->is_MinMax(); });
2785 }
2786 auto enqueue_init_mem_projs = [&](ProjNode* proj) {
2787 add_users_to_worklist0(proj, worklist);
2788 };
2789 // If changed initialization activity, check dependent Stores
2790 if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
2791 InitializeNode* init = use->as_Allocate()->initialization();
2792 if (init != nullptr) {
2793 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2794 }
2795 }
2796 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2797 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2798 // to guarantee the change is not missed.
2799 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2800 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2801 if (p != nullptr) {
2802 add_users_to_worklist0(p, worklist);
2803 }
2804 }
2805
2806 if (use_op == Op_Initialize) {
2807 InitializeNode* init = use->as_Initialize();
2808 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2809 }
2810 // Loading the java mirror from a Klass requires two loads and the type
2811 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2812 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
2813 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2814 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
2815
2816 if (use_op == Op_CastP2X) {
2817 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2818 Node* u = use->fast_out(i2);
2819 // TODO 8350865 Still needed? Yes, I think this is from PhaseMacroExpand::expand_mh_intrinsic_return
2820 if (u->Opcode() == Op_AndX) {
2821 worklist.push(u);
2822 }
2823 // Search for CmpL(OrL(CastP2X(..), CastP2X(..)), 0L)
2824 if (u->Opcode() == Op_OrL) {
2825 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
2826 Node* cmp = u->fast_out(i3);
2827 if (cmp->Opcode() == Op_CmpL) {
2828 worklist.push(cmp);
2829 }
2830 }
2831 }
2832 }
2833 }
2834 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2835 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2836 Node* u = use->fast_out(i2);
2837 const Type* ut = u->bottom_type();
2838 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2839 if (has_load_barrier_nodes) {
2840 // Search for load barriers behind the load
2841 add_users_to_worklist_if(worklist, u, [&](Node* b) {
2842 return bs->is_gc_barrier_node(b);
2843 });
2844 }
2845 worklist.push(u);
2846 }
2847 }
2848 }
2849 // Give CallStaticJavaNode::remove_useless_allocation a chance to run
2850 if (use->is_Region()) {
2851 Node* c = use;
2852 do {
2853 c = c->unique_ctrl_out_or_null();
2854 } while (c != nullptr && c->is_Region());
2855 if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
2856 worklist.push(c);
2857 }
2858 }
2859 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2860 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2861 if (use->outcnt() == 1) {
2862 Node* cmp = use->unique_out();
2863 worklist.push(cmp);
2864 }
2865 }
2866 // VectorMaskToLongNode::Ideal_MaskAll looks through VectorStoreMask
2867 // to fold constant masks.
2868 if (use_op == Op_VectorStoreMask) {
2869 add_users_to_worklist_if(worklist, use, [](Node* u) { return u->Opcode() == Op_VectorMaskToLong; });
2870 }
2871
2872 // From CastX2PNode::Ideal
2873 // CastX2P(AddX(x, y))
2874 // CastX2P(SubX(x, y))
2875 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2876 add_users_to_worklist_if(worklist, use, [](Node* u) { return u->Opcode() == Op_CastX2P; });
2877 }
2878
2879 /* AndNode has a special handling when one of the operands is a LShiftNode:
2880 * (LHS << s) & RHS
2881 * if RHS fits in less than s bits, the value of this expression is 0.
2882 * The difficulty is that there might be a conversion node (ConvI2L) between
2883 * the LShiftINode and the AndLNode, like so:
2884 * AndLNode(ConvI2L(LShiftI(LHS, s)), RHS)
2885 * This case is handled by And[IL]Node::Value(PhaseGVN*)
2886 * (see `AndIL_min_trailing_zeros`).
2887 *
2888 * But, when the shift is updated during IGVN, pushing the user (ConvI2L)
2889 * is not enough: there might be no update happening there. We need to
2890 * directly push the And[IL]Node on the worklist, jumping over ConvI2L.
2891 *
2892 * Moreover we can have ConstraintCasts in between. It may look like
2893 * ConstraintCast+ -> ConvI2L -> ConstraintCast+ -> And
2894 * and And[IL]Node::Value(PhaseGVN*) still handles that by looking through casts.
2895 * So we must deal with that as well.
2896 */
2897 if (use->is_ConstraintCast() || use_op == Op_ConvI2L) {
2898 auto is_boundary = [](Node* n){ return !n->is_ConstraintCast() && n->Opcode() != Op_ConvI2L; };
2899 auto push_and_to_worklist = [&worklist](Node* n){
2900 if (n->Opcode() == Op_AndL || n->Opcode() == Op_AndI) {
2901 worklist.push(n);
2902 }
2903 };
2904 use->visit_uses(push_and_to_worklist, is_boundary);
2905 }
2906
2907 // If changed Sub inputs, check Add for identity.
2908 // e.g., (x - y) + y -> x; x + (y - x) -> y.
2909 if (use_op == Op_SubI || use_op == Op_SubL) {
2910 const int add_op = (use_op == Op_SubI) ? Op_AddI : Op_AddL;
2911 add_users_to_worklist_if(worklist, use, [=](Node* u) { return u->Opcode() == add_op; });
2912 }
2913 }
2914
2915 /**
2916 * Remove the speculative part of all types that we know of
2917 */
2918 void PhaseIterGVN::remove_speculative_types() {
2919 assert(UseTypeSpeculation, "speculation is off");
2920 for (uint i = 0; i < _types.Size(); i++) {
2921 const Type* t = _types.fast_lookup(i);
2922 if (t != nullptr) {
2923 _types.map(i, t->remove_speculative());
2924 }
2925 }
2926 _table.check_no_speculative_types();
2927 }
2928
2929 //=============================================================================
2930 #ifndef PRODUCT
2931 uint PhaseCCP::_total_invokes = 0;
2932 uint PhaseCCP::_total_constants = 0;
2933 #endif
2934 //------------------------------PhaseCCP---------------------------------------
2935 // Conditional Constant Propagation, ala Wegman & Zadeck
2936 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
2937 NOT_PRODUCT( clear_constants(); )
2938 assert( _worklist.size() == 0, "" );
2939 _phase = PhaseValuesType::ccp;
2940 analyze();
2941 }
2942
2943 #ifndef PRODUCT
2944 //------------------------------~PhaseCCP--------------------------------------
2945 PhaseCCP::~PhaseCCP() {
2946 inc_invokes();
2947 _total_constants += count_constants();
2948 }
2949 #endif
2950
2951
2952 #ifdef ASSERT
2953 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
2954 if (tnew->meet(told) != tnew->remove_speculative()) {
2955 n->dump(3);
2956 tty->print("told = "); told->dump(); tty->cr();
2957 tty->print("tnew = "); tnew->dump(); tty->cr();
2958 fatal("Not monotonic");
2959 }
2960 assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
2961 assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases");
2962 }
2963 #endif //ASSERT
2964
2965 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until
2966 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time
2967 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren
2968 // or nodes even further down back to the worklist because their type could change as a result of the current type
2969 // change).
2970 void PhaseCCP::analyze() {
2971 // Initialize all types to TOP, optimistic analysis
2972 for (uint i = 0; i < C->unique(); i++) {
2973 _types.map(i, Type::TOP);
2974 }
2975
2976 // CCP worklist is placed on a local arena, so that we can allow ResourceMarks on "Compile::current()->resource_arena()".
2977 // We also do not want to put the worklist on "Compile::current()->comp_arena()", as that one only gets de-allocated after
2978 // Compile is over. The local arena gets de-allocated at the end of its scope.
2979 ResourceArea local_arena(mtCompiler);
2980 Unique_Node_List worklist(&local_arena);
2981 Unique_Node_List worklist_revisit(&local_arena);
2982 DEBUG_ONLY(Unique_Node_List worklist_verify(&local_arena);)
2983
2984 // Push root onto worklist
2985 worklist.push(C->root());
2986
2987 assert(_root_and_safepoints.size() == 0, "must be empty (unused)");
2988 _root_and_safepoints.push(C->root());
2989
2990 // This is the meat of CCP: pull from worklist; compute new value; push changes out.
2991
2992 // Do the first round. Since all initial types are TOP, this will visit all alive nodes.
2993 while (worklist.size() != 0) {
2994 Node* n = fetch_next_node(worklist);
2995 DEBUG_ONLY(worklist_verify.push(n);)
2996 if (needs_revisit(n)) {
2997 worklist_revisit.push(n);
2998 }
2999 if (n->is_SafePoint()) {
3000 // Make sure safepoints are processed by PhaseCCP::transform even if they are
3001 // not reachable from the bottom. Otherwise, infinite loops would be removed.
3002 _root_and_safepoints.push(n);
3003 }
3004 analyze_step(worklist, n);
3005 }
3006
3007 // More rounds to catch updates far in the graph.
3008 // Revisit nodes that might be able to refine their types at the end of the round.
3009 // If so, process these nodes. If there is remaining work, start another round.
3010 do {
3011 while (worklist.size() != 0) {
3012 Node* n = fetch_next_node(worklist);
3013 analyze_step(worklist, n);
3014 }
3015 for (uint t = 0; t < worklist_revisit.size(); t++) {
3016 Node* n = worklist_revisit.at(t);
3017 analyze_step(worklist, n);
3018 }
3019 } while (worklist.size() != 0);
3020
3021 DEBUG_ONLY(verify_analyze(worklist_verify);)
3022 }
3023
3024 void PhaseCCP::analyze_step(Unique_Node_List& worklist, Node* n) {
3025 const Type* new_type = n->Value(this);
3026 if (new_type != type(n)) {
3027 DEBUG_ONLY(verify_type(n, new_type, type(n));)
3028 dump_type_and_node(n, new_type);
3029 set_type(n, new_type);
3030 push_child_nodes_to_worklist(worklist, n);
3031 }
3032 if (KillPathsReachableByDeadTypeNode && n->is_Type() && new_type == Type::TOP) {
3033 // Keep track of Type nodes to kill CFG paths that use Type
3034 // nodes that become dead.
3035 _maybe_top_type_nodes.push(n);
3036 }
3037 }
3038
3039 // Some nodes can refine their types due to type change somewhere deep
3040 // in the graph. We will need to revisit them before claiming convergence.
3041 // Add nodes here if particular *Node::Value is doing deep graph traversals
3042 // not handled by PhaseCCP::push_more_uses().
3043 bool PhaseCCP::needs_revisit(Node* n) const {
3044 // LoadNode performs deep traversals. Load is not notified for changes far away.
3045 if (n->is_Load()) {
3046 return true;
3047 }
3048 // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
3049 if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
3050 return true;
3051 }
3052 return false;
3053 }
3054
3055 #ifdef ASSERT
3056 // For every node n on verify list, check if type(n) == n->Value()
3057 // Note for CCP the non-convergence can lead to unsound analysis and mis-compilation.
3058 // Therefore, we are verifying Value convergence strictly.
3059 void PhaseCCP::verify_analyze(Unique_Node_List& worklist_verify) {
3060 while (worklist_verify.size()) {
3061 Node* n = worklist_verify.pop();
3062
3063 // An assert in verify_Value_for means that PhaseCCP is not at fixpoint
3064 // and that the analysis result may be unsound.
3065 // If this happens, check why the reported nodes were not processed again in CCP.
3066 // We should either make sure that these nodes are properly added back to the CCP worklist
3067 // in PhaseCCP::push_child_nodes_to_worklist() to update their type in the same round,
3068 // or that they are added in PhaseCCP::needs_revisit() so that analysis revisits
3069 // them at the end of the round.
3070 verify_Value_for(n, true);
3071 }
3072 }
3073 #endif
3074
3075 // Fetch next node from worklist to be examined in this iteration.
3076 Node* PhaseCCP::fetch_next_node(Unique_Node_List& worklist) {
3077 if (StressCCP) {
3078 return worklist.remove(C->random() % worklist.size());
3079 } else {
3080 return worklist.pop();
3081 }
3082 }
3083
3084 #ifndef PRODUCT
3085 void PhaseCCP::dump_type_and_node(const Node* n, const Type* t) {
3086 if (TracePhaseCCP) {
3087 t->dump();
3088 do {
3089 tty->print("\t");
3090 } while (tty->position() < 16);
3091 n->dump();
3092 }
3093 }
3094 #endif
3095
3096 bool PhaseCCP::not_bottom_type(Node* n) const {
3097 return n->bottom_type() != type(n);
3098 }
3099
3100 // We need to propagate the type change of 'n' to all its uses. Depending on the kind of node, additional nodes
3101 // (grandchildren or even further down) need to be revisited as their types could also be improved as a result
3102 // of the new type of 'n'. Push these nodes to the worklist.
3103 void PhaseCCP::push_child_nodes_to_worklist(Unique_Node_List& worklist, Node* n) const {
3104 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3105 Node* use = n->fast_out(i);
3106 push_if_not_bottom_type(worklist, use);
3107 push_more_uses(worklist, n, use);
3108 }
3109 }
3110
3111 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
3112 if (not_bottom_type(n)) {
3113 worklist.push(n);
3114 }
3115 }
3116
3117 // For some nodes, we need to propagate the type change to grandchildren or even further down.
3118 // Add them back to the worklist.
3119 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
3120 push_phis(worklist, use);
3121 push_catch(worklist, use);
3122 push_cmpu(worklist, use);
3123 push_counted_loop_phi(worklist, parent, use);
3124 push_cast(worklist, use);
3125 push_loadp(worklist, use);
3126 push_and(worklist, parent, use);
3127 push_cast_ii(worklist, parent, use);
3128 push_opaque_zero_trip_guard(worklist, use);
3129 push_bool_with_cmpu_and_mask(worklist, use);
3130 }
3131
3132
3133 // We must recheck Phis too if use is a Region.
3134 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
3135 if (use->is_Region()) {
3136 add_users_to_worklist_if(worklist, use, [&](Node* u) {
3137 return not_bottom_type(u);
3138 });
3139 }
3140 }
3141
3142 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
3143 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
3144 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
3145 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
3146 if (use->is_Call()) {
3147 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3148 Node* proj = use->fast_out(i);
3149 if (proj->is_Proj() && proj->as_Proj()->_con == TypeFunc::Control) {
3150 Node* catch_node = proj->find_out_with(Op_Catch);
3151 if (catch_node != nullptr) {
3152 worklist.push(catch_node);
3153 }
3154 }
3155 }
3156 }
3157 }
3158
3159 // CmpU nodes can get their type information from two nodes up in the graph (instead of from the nodes immediately
3160 // above). Make sure they are added to the worklist if nodes they depend on are updated since they could be missed
3161 // and get wrong types otherwise.
3162 void PhaseCCP::push_cmpu(Unique_Node_List& worklist, const Node* use) const {
3163 uint use_op = use->Opcode();
3164 if (use_op == Op_AddI || use_op == Op_SubI) {
3165 // Got a CmpU or CmpU3 which might need the new type information from node n.
3166 add_users_to_worklist_if(worklist, use, [&](Node* u) {
3167 uint op = u->Opcode();
3168 return (op == Op_CmpU || op == Op_CmpU3) && not_bottom_type(u);
3169 });
3170 }
3171 }
3172
3173 // Look for the following shape, which can be optimized by BoolNode::Value_cmpu_and_mask() (i.e. corresponds to case
3174 // (1b): "(m & x) <u (m + 1))".
3175 // If any of the inputs on the level (%%) change, we need to revisit Bool because we could have prematurely found that
3176 // the Bool is constant (i.e. case (1b) can be applied) which could become invalid with new type information during CCP.
3177 //
3178 // m x m 1 (%%)
3179 // \ / \ /
3180 // AndI AddI
3181 // \ /
3182 // CmpU
3183 // |
3184 // Bool
3185 //
3186 void PhaseCCP::push_bool_with_cmpu_and_mask(Unique_Node_List& worklist, const Node* use) const {
3187 uint use_op = use->Opcode();
3188 if (use_op != Op_AndI && (use_op != Op_AddI || use->in(2)->find_int_con(0) != 1)) {
3189 // Not "m & x" or "m + 1"
3190 return;
3191 }
3192 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3193 Node* cmpu = use->fast_out(i);
3194 if (cmpu->Opcode() == Op_CmpU) {
3195 push_bool_matching_case1b(worklist, cmpu);
3196 }
3197 }
3198 }
3199
3200 // Push any Bool below 'cmpu' that matches case (1b) of BoolNode::Value_cmpu_and_mask().
3201 void PhaseCCP::push_bool_matching_case1b(Unique_Node_List& worklist, const Node* cmpu) const {
3202 assert(cmpu->Opcode() == Op_CmpU, "must be");
3203 for (DUIterator_Fast imax, i = cmpu->fast_outs(imax); i < imax; i++) {
3204 Node* bol = cmpu->fast_out(i);
3205 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::lt) {
3206 // Not a Bool with "<u"
3207 continue;
3208 }
3209 Node* andI = cmpu->in(1);
3210 Node* addI = cmpu->in(2);
3211 if (andI->Opcode() != Op_AndI || addI->Opcode() != Op_AddI || addI->in(2)->find_int_con(0) != 1) {
3212 // Not "m & x" and "m + 1"
3213 continue;
3214 }
3215
3216 Node* m = addI->in(1);
3217 if (m == andI->in(1) || m == andI->in(2)) {
3218 // Is "m" shared? Matched (1b) and thus we revisit Bool.
3219 push_if_not_bottom_type(worklist, bol);
3220 }
3221 }
3222 }
3223
3224 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
3225 // Seem PhiNode::Value().
3226 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
3227 uint use_op = use->Opcode();
3228 if (use_op == Op_CmpI || use_op == Op_CmpL) {
3229 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
3230 if (phi != nullptr) {
3231 worklist.push(phi);
3232 }
3233 }
3234 }
3235
3236 // TODO 8350865 Still needed? Yes, I think this is from PhaseMacroExpand::expand_mh_intrinsic_return
3237 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
3238 uint use_op = use->Opcode();
3239 if (use_op == Op_CastP2X) {
3240 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
3241 Node* u = use->fast_out(i2);
3242 if (u->Opcode() == Op_AndX) {
3243 worklist.push(u);
3244 }
3245 }
3246 }
3247 }
3248
3249 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
3250 // See LoadNode::Value().
3251 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
3252 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
3253 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
3254
3255 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
3256 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3257 Node* loadp = use->fast_out(i);
3258 const Type* ut = loadp->bottom_type();
3259 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
3260 if (has_load_barrier_nodes) {
3261 // Search for load barriers behind the load
3262 push_load_barrier(worklist, barrier_set, loadp);
3263 }
3264 worklist.push(loadp);
3265 }
3266 }
3267 }
3268 }
3269
3270 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
3271 add_users_to_worklist_if(worklist, use, [&](Node* u) {
3272 return barrier_set->is_gc_barrier_node(u);
3273 });
3274 }
3275
3276 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3277 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3278 // value changed.
3279 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3280 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3281 uint use_op = use->Opcode();
3282 if (
3283 // Pattern: parent (now constant) -> (ConstraintCast | ConvI2L)* -> And
3284 (parent_type != nullptr && parent_type->is_con()) ||
3285 // Pattern: parent -> LShift (use) -> (ConstraintCast | ConvI2L)* -> And
3286 ((use_op == Op_LShiftI || use_op == Op_LShiftL) && use->in(2) == parent)) {
3287
3288 auto push_and_uses_to_worklist = [&](Node* n) {
3289 uint opc = n->Opcode();
3290 if (opc == Op_AndI || opc == Op_AndL) {
3291 push_if_not_bottom_type(worklist, n);
3292 }
3293 };
3294 auto is_boundary = [](Node* n) {
3295 return !(n->is_ConstraintCast() || n->Opcode() == Op_ConvI2L);
3296 };
3297 use->visit_uses(push_and_uses_to_worklist, is_boundary);
3298 }
3299 }
3300
3301 // CastII::Value() optimizes CmpI/If patterns if the right input of the CmpI has a constant type. If the CastII input is
3302 // the same node as the left input into the CmpI node, the type of the CastII node can be improved accordingly. Add the
3303 // CastII node back to the worklist to re-apply Value() to either not miss this optimization or to undo it because it
3304 // cannot be applied anymore. We could have optimized the type of the CastII before but now the type of the right input
3305 // of the CmpI (i.e. 'parent') is no longer constant. The type of the CastII must be widened in this case.
3306 void PhaseCCP::push_cast_ii(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3307 if (use->Opcode() == Op_CmpI && use->in(2) == parent) {
3308 Node* other_cmp_input = use->in(1);
3309 add_users_to_worklist_if(worklist, other_cmp_input, [&](Node* u) {
3310 return u->is_CastII() && not_bottom_type(u);
3311 });
3312 }
3313 }
3314
3315 void PhaseCCP::push_opaque_zero_trip_guard(Unique_Node_List& worklist, const Node* use) const {
3316 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
3317 push_if_not_bottom_type(worklist, use->unique_out());
3318 }
3319 }
3320
3321 //------------------------------do_transform-----------------------------------
3322 // Top level driver for the recursive transformer
3323 void PhaseCCP::do_transform() {
3324 // Correct leaves of new-space Nodes; they point to old-space.
3325 C->set_root( transform(C->root())->as_Root() );
3326 assert( C->top(), "missing TOP node" );
3327 assert( C->root(), "missing root" );
3328 }
3329
3330 //------------------------------transform--------------------------------------
3331 // Given a Node in old-space, clone him into new-space.
3332 // Convert any of his old-space children into new-space children.
3333 Node *PhaseCCP::transform( Node *n ) {
3334 assert(n->is_Root(), "traversal must start at root");
3335 assert(_root_and_safepoints.member(n), "root (n) must be in list");
3336
3337 ResourceMark rm;
3338 // Map: old node idx -> node after CCP (or nullptr if not yet transformed or useless).
3339 Node_List node_map;
3340 // Pre-allocate to avoid frequent realloc
3341 GrowableArray <Node *> transform_stack(C->live_nodes() >> 1);
3342 // track all visited nodes, so that we can remove the complement
3343 Unique_Node_List useful;
3344
3345 if (KillPathsReachableByDeadTypeNode) {
3346 for (uint i = 0; i < _maybe_top_type_nodes.size(); ++i) {
3347 Node* type_node = _maybe_top_type_nodes.at(i);
3348 if (type(type_node) == Type::TOP) {
3349 ResourceMark rm;
3350 type_node->as_Type()->make_paths_from_here_dead(this, nullptr, "ccp");
3351 }
3352 }
3353 } else {
3354 assert(_maybe_top_type_nodes.size() == 0, "we don't need type nodes");
3355 }
3356
3357 // Initialize the traversal.
3358 // This CCP pass may prove that no exit test for a loop ever succeeds (i.e. the loop is infinite). In that case,
3359 // the logic below doesn't follow any path from Root to the loop body: there's at least one such path but it's proven
3360 // never taken (its type is TOP). As a consequence the node on the exit path that's input to Root (let's call it n) is
3361 // replaced by the top node and the inputs of that node n are not enqueued for further processing. If CCP only works
3362 // through the graph from Root, this causes the loop body to never be processed here even when it's not dead (that
3363 // is reachable from Root following its uses). To prevent that issue, transform() starts walking the graph from Root
3364 // and all safepoints.
3365 for (uint i = 0; i < _root_and_safepoints.size(); ++i) {
3366 Node* nn = _root_and_safepoints.at(i);
3367 Node* new_node = node_map[nn->_idx];
3368 assert(new_node == nullptr, "");
3369 new_node = transform_once(nn); // Check for constant
3370 node_map.map(nn->_idx, new_node); // Flag as having been cloned
3371 transform_stack.push(new_node); // Process children of cloned node
3372 useful.push(new_node);
3373 }
3374
3375 while (transform_stack.is_nonempty()) {
3376 Node* clone = transform_stack.pop();
3377 uint cnt = clone->req();
3378 for( uint i = 0; i < cnt; i++ ) { // For all inputs do
3379 Node *input = clone->in(i);
3380 if( input != nullptr ) { // Ignore nulls
3381 Node *new_input = node_map[input->_idx]; // Check for cloned input node
3382 if( new_input == nullptr ) {
3383 new_input = transform_once(input); // Check for constant
3384 node_map.map( input->_idx, new_input );// Flag as having been cloned
3385 transform_stack.push(new_input); // Process children of cloned node
3386 useful.push(new_input);
3387 }
3388 assert( new_input == clone->in(i), "insanity check");
3389 }
3390 }
3391 }
3392
3393 // The above transformation might lead to subgraphs becoming unreachable from the
3394 // bottom while still being reachable from the top. As a result, nodes in that
3395 // subgraph are not transformed and their bottom types are not updated, leading to
3396 // an inconsistency between bottom_type() and type(). In rare cases, LoadNodes in
3397 // such a subgraph, might be re-enqueued for IGVN indefinitely by MemNode::Ideal_common
3398 // because their address type is inconsistent. Therefore, we aggressively remove
3399 // all useless nodes here even before PhaseIdealLoop::build_loop_late gets a chance
3400 // to remove them anyway.
3401 if (C->cached_top_node()) {
3402 useful.push(C->cached_top_node());
3403 }
3404 C->update_dead_node_list(useful);
3405 remove_useless_nodes(useful.member_set());
3406 _worklist.remove_useless_nodes(useful.member_set());
3407 C->disconnect_useless_nodes(useful, _worklist, &_root_and_safepoints);
3408
3409 Node* new_root = node_map[n->_idx];
3410 assert(new_root->is_Root(), "transformed root node must be a root node");
3411 return new_root;
3412 }
3413
3414 //------------------------------transform_once---------------------------------
3415 // For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
3416 Node *PhaseCCP::transform_once( Node *n ) {
3417 const Type *t = type(n);
3418 // Constant? Use constant Node instead
3419 if( t->singleton() ) {
3420 Node *nn = n; // Default is to return the original constant
3421 if( t == Type::TOP ) {
3422 // cache my top node on the Compile instance
3423 if( C->cached_top_node() == nullptr || C->cached_top_node()->in(0) == nullptr ) {
3424 C->set_cached_top_node(ConNode::make(Type::TOP));
3425 set_type(C->top(), Type::TOP);
3426 }
3427 nn = C->top();
3428 }
3429 if( !n->is_Con() ) {
3430 if( t != Type::TOP ) {
3431 nn = makecon(t); // ConNode::make(t);
3432 NOT_PRODUCT( inc_constants(); )
3433 } else if( n->is_Region() ) { // Unreachable region
3434 // Note: nn == C->top()
3435 n->set_req(0, nullptr); // Cut selfreference
3436 bool progress = true;
3437 uint max = n->outcnt();
3438 DUIterator i;
3439 while (progress) {
3440 progress = false;
3441 // Eagerly remove dead phis to avoid phis copies creation.
3442 for (i = n->outs(); n->has_out(i); i++) {
3443 Node* m = n->out(i);
3444 if (m->is_Phi()) {
3445 assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
3446 replace_node(m, nn);
3447 if (max != n->outcnt()) {
3448 progress = true;
3449 i = n->refresh_out_pos(i);
3450 max = n->outcnt();
3451 }
3452 }
3453 }
3454 }
3455 }
3456 replace_node(n,nn); // Update DefUse edges for new constant
3457 }
3458 return nn;
3459 }
3460
3461 // If x is a TypeNode, capture any more-precise type permanently into Node
3462 if (t != n->bottom_type()) {
3463 hash_delete(n); // changing bottom type may force a rehash
3464 n->raise_bottom_type(t);
3465 _worklist.push(n); // n re-enters the hash table via the worklist
3466 add_users_to_worklist(n); // if ideal or identity optimizations depend on the input type, users need to be notified
3467 }
3468
3469 // TEMPORARY fix to ensure that 2nd GVN pass eliminates null checks
3470 switch( n->Opcode() ) {
3471 case Op_CallStaticJava: // Give post-parse call devirtualization a chance
3472 case Op_CallDynamicJava:
3473 case Op_FastLock: // Revisit FastLocks for lock coarsening
3474 case Op_If:
3475 case Op_CountedLoopEnd:
3476 case Op_Region:
3477 case Op_Loop:
3478 case Op_CountedLoop:
3479 case Op_Conv2B:
3480 case Op_Opaque1:
3481 _worklist.push(n);
3482 break;
3483 default:
3484 break;
3485 }
3486
3487 return n;
3488 }
3489
3490 //---------------------------------saturate------------------------------------
3491 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
3492 const Type* limit_type) const {
3493 const Type* wide_type = new_type->widen(old_type, limit_type);
3494 if (wide_type != new_type) { // did we widen?
3495 // If so, we may have widened beyond the limit type. Clip it back down.
3496 new_type = wide_type->filter(limit_type);
3497 }
3498 return new_type;
3499 }
3500
3501 //------------------------------print_statistics-------------------------------
3502 #ifndef PRODUCT
3503 void PhaseCCP::print_statistics() {
3504 tty->print_cr("CCP: %d constants found: %d", _total_invokes, _total_constants);
3505 }
3506 #endif
3507
3508
3509 //=============================================================================
3510 #ifndef PRODUCT
3511 uint PhasePeephole::_total_peepholes = 0;
3512 #endif
3513 //------------------------------PhasePeephole----------------------------------
3514 // Conditional Constant Propagation, ala Wegman & Zadeck
3515 PhasePeephole::PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg )
3516 : PhaseTransform(Peephole), _regalloc(regalloc), _cfg(cfg) {
3517 NOT_PRODUCT( clear_peepholes(); )
3518 }
3519
3520 #ifndef PRODUCT
3521 //------------------------------~PhasePeephole---------------------------------
3522 PhasePeephole::~PhasePeephole() {
3523 _total_peepholes += count_peepholes();
3524 }
3525 #endif
3526
3527 //------------------------------transform--------------------------------------
3528 Node *PhasePeephole::transform( Node *n ) {
3529 ShouldNotCallThis();
3530 return nullptr;
3531 }
3532
3533 //------------------------------do_transform-----------------------------------
3534 void PhasePeephole::do_transform() {
3535 bool method_name_not_printed = true;
3536
3537 // Examine each basic block
3538 for (uint block_number = 1; block_number < _cfg.number_of_blocks(); ++block_number) {
3539 Block* block = _cfg.get_block(block_number);
3540 bool block_not_printed = true;
3541
3542 for (bool progress = true; progress;) {
3543 progress = false;
3544 // block->end_idx() not valid after PhaseRegAlloc
3545 uint end_index = block->number_of_nodes();
3546 for( uint instruction_index = end_index - 1; instruction_index > 0; --instruction_index ) {
3547 Node *n = block->get_node(instruction_index);
3548 if( n->is_Mach() ) {
3549 MachNode *m = n->as_Mach();
3550 // check for peephole opportunities
3551 int result = m->peephole(block, instruction_index, &_cfg, _regalloc);
3552 if( result != -1 ) {
3553 #ifndef PRODUCT
3554 if( PrintOptoPeephole ) {
3555 // Print method, first time only
3556 if( C->method() && method_name_not_printed ) {
3557 C->method()->print_short_name(); tty->cr();
3558 method_name_not_printed = false;
3559 }
3560 // Print this block
3561 if( Verbose && block_not_printed) {
3562 tty->print_cr("in block");
3563 block->dump();
3564 block_not_printed = false;
3565 }
3566 // Print the peephole number
3567 tty->print_cr("peephole number: %d", result);
3568 }
3569 inc_peepholes();
3570 #endif
3571 // Set progress, start again
3572 progress = true;
3573 break;
3574 }
3575 }
3576 }
3577 }
3578 }
3579 }
3580
3581 //------------------------------print_statistics-------------------------------
3582 #ifndef PRODUCT
3583 void PhasePeephole::print_statistics() {
3584 tty->print_cr("Peephole: peephole rules applied: %d", _total_peepholes);
3585 }
3586 #endif
3587
3588
3589 //=============================================================================
3590 //------------------------------set_req_X--------------------------------------
3591 void Node::set_req_X( uint i, Node *n, PhaseIterGVN *igvn ) {
3592 assert( is_not_dead(n), "can not use dead node");
3593 #ifdef ASSERT
3594 if (igvn->hash_find(this) == this) {
3595 tty->print_cr("Need to remove from hash before changing edges");
3596 this->dump(1);
3597 tty->print_cr("Set at i = %d", i);
3598 n->dump();
3599 assert(false, "Need to remove from hash before changing edges");
3600 }
3601 #endif
3602 Node *old = in(i);
3603 set_req(i, n);
3604
3605 // old goes dead?
3606 if( old ) {
3607 switch (old->outcnt()) {
3608 case 0:
3609 // Put into the worklist to kill later. We do not kill it now because the
3610 // recursive kill will delete the current node (this) if dead-loop exists
3611 if (!old->is_top())
3612 igvn->_worklist.push( old );
3613 break;
3614 case 1:
3615 if( old->is_Store() || old->has_special_unique_user() )
3616 igvn->add_users_to_worklist( old );
3617 break;
3618 case 2:
3619 if( old->is_Store() )
3620 igvn->add_users_to_worklist( old );
3621 if( old->Opcode() == Op_Region )
3622 igvn->_worklist.push(old);
3623 break;
3624 case 3:
3625 if( old->Opcode() == Op_Region ) {
3626 igvn->_worklist.push(old);
3627 igvn->add_users_to_worklist( old );
3628 }
3629 break;
3630 default:
3631 break;
3632 }
3633
3634 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, old);
3635 }
3636 }
3637
3638 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3639 PhaseIterGVN* igvn = gvn->is_IterGVN();
3640 if (igvn == nullptr) {
3641 set_req(i, n);
3642 return;
3643 }
3644 set_req_X(i, n, igvn);
3645 }
3646
3647 //-------------------------------replace_by-----------------------------------
3648 // Using def-use info, replace one node for another. Follow the def-use info
3649 // to all users of the OLD node. Then make all uses point to the NEW node.
3650 void Node::replace_by(Node *new_node) {
3651 assert(!is_top(), "top node has no DU info");
3652 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3653 Node* use = last_out(i);
3654 uint uses_found = 0;
3655 for (uint j = 0; j < use->len(); j++) {
3656 if (use->in(j) == this) {
3657 if (j < use->req())
3658 use->set_req(j, new_node);
3659 else use->set_prec(j, new_node);
3660 uses_found++;
3661 }
3662 }
3663 i -= uses_found; // we deleted 1 or more copies of this edge
3664 }
3665 }
3666
3667 //=============================================================================
3668 //-----------------------------------------------------------------------------
3669 void Type_Array::grow( uint i ) {
3670 assert(_a == Compile::current()->comp_arena(), "Should be allocated in comp_arena");
3671 if( !_max ) {
3672 _max = 1;
3673 _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) );
3674 _types[0] = nullptr;
3675 }
3676 uint old = _max;
3677 _max = next_power_of_2(i);
3678 _types = (const Type**)_a->Arealloc( _types, old*sizeof(Type*),_max*sizeof(Type*));
3679 memset( &_types[old], 0, (_max-old)*sizeof(Type*) );
3680 }
3681
3682 //------------------------------dump-------------------------------------------
3683 #ifndef PRODUCT
3684 void Type_Array::dump() const {
3685 uint max = Size();
3686 for( uint i = 0; i < max; i++ ) {
3687 if( _types[i] != nullptr ) {
3688 tty->print(" %d\t== ", i); _types[i]->dump(); tty->cr();
3689 }
3690 }
3691 }
3692 #endif