1 /*
  2  * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/vmreg.inline.hpp"
 27 #include "compiler/oopMap.hpp"
 28 #include "memory/resourceArea.hpp"
 29 #include "opto/addnode.hpp"
 30 #include "opto/callnode.hpp"
 31 #include "opto/compile.hpp"
 32 #include "opto/machnode.hpp"
 33 #include "opto/matcher.hpp"
 34 #include "opto/output.hpp"
 35 #include "opto/phase.hpp"
 36 #include "opto/regalloc.hpp"
 37 #include "opto/rootnode.hpp"
 38 #include "utilities/align.hpp"
 39 
 40 // The functions in this file builds OopMaps after all scheduling is done.
 41 //
 42 // OopMaps contain a list of all registers and stack-slots containing oops (so
 43 // they can be updated by GC).  OopMaps also contain a list of derived-pointer
 44 // base-pointer pairs.  When the base is moved, the derived pointer moves to
 45 // follow it.  Finally, any registers holding callee-save values are also
 46 // recorded.  These might contain oops, but only the caller knows.
 47 //
 48 // BuildOopMaps implements a simple forward reaching-defs solution.  At each
 49 // GC point we'll have the reaching-def Nodes.  If the reaching Nodes are
 50 // typed as pointers (no offset), then they are oops.  Pointers+offsets are
 51 // derived pointers, and bases can be found from them.  Finally, we'll also
 52 // track reaching callee-save values.  Note that a copy of a callee-save value
 53 // "kills" it's source, so that only 1 copy of a callee-save value is alive at
 54 // a time.
 55 //
 56 // We run a simple bitvector liveness pass to help trim out dead oops.  Due to
 57 // irreducible loops, we can have a reaching def of an oop that only reaches
 58 // along one path and no way to know if it's valid or not on the other path.
 59 // The bitvectors are quite dense and the liveness pass is fast.
 60 //
 61 // At GC points, we consult this information to build OopMaps.  All reaching
 62 // defs typed as oops are added to the OopMap.  Only 1 instance of a
 63 // callee-save register can be recorded.  For derived pointers, we'll have to
 64 // find and record the register holding the base.
 65 //
 66 // The reaching def's is a simple 1-pass worklist approach.  I tried a clever
 67 // breadth-first approach but it was worse (showed O(n^2) in the
 68 // pick-next-block code).
 69 //
 70 // The relevant data is kept in a struct of arrays (it could just as well be
 71 // an array of structs, but the struct-of-arrays is generally a little more
 72 // efficient).  The arrays are indexed by register number (including
 73 // stack-slots as registers) and so is bounded by 200 to 300 elements in
 74 // practice.  One array will map to a reaching def Node (or null for
 75 // conflict/dead).  The other array will map to a callee-saved register or
 76 // OptoReg::Bad for not-callee-saved.
 77 
 78 
 79 // Structure to pass around
 80 struct OopFlow : public ArenaObj {
 81   short *_callees;              // Array mapping register to callee-saved
 82   Node **_defs;                 // array mapping register to reaching def
 83                                 // or null if dead/conflict
 84   // OopFlow structs, when not being actively modified, describe the _end_ of
 85   // this block.
 86   Block *_b;                    // Block for this struct
 87   OopFlow *_next;               // Next free OopFlow
 88                                 // or null if dead/conflict
 89   Compile* C;
 90 
 91   OopFlow( short *callees, Node **defs, Compile* c ) : _callees(callees), _defs(defs),
 92     _b(nullptr), _next(nullptr), C(c) { }
 93 
 94   // Given reaching-defs for this block start, compute it for this block end
 95   void compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash );
 96 
 97   // Merge these two OopFlows into the 'this' pointer.
 98   void merge( OopFlow *flow, int max_reg );
 99 
100   // Copy a 'flow' over an existing flow
101   void clone( OopFlow *flow, int max_size);
102 
103   // Make a new OopFlow from scratch
104   static OopFlow *make( Arena *A, int max_size, Compile* C );
105 
106   // Build an oopmap from the current flow info
107   OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live );
108 };
109 
110 // Given reaching-defs for this block start, compute it for this block end
111 void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
112 
113   for( uint i=0; i<_b->number_of_nodes(); i++ ) {
114     Node *n = _b->get_node(i);
115 
116     if( n->jvms() ) {           // Build an OopMap here?
117       JVMState *jvms = n->jvms();
118       // no map needed for leaf calls
119       if( n->is_MachSafePoint() && !n->is_MachCallLeaf() ) {
120         int *live = (int*) (*safehash)[n];
121         assert( live, "must find live" );
122         n->as_MachSafePoint()->set_oop_map( build_oop_map(n,max_reg,regalloc, live) );
123       }
124     }
125 
126     // Assign new reaching def's.
127     // Note that I padded the _defs and _callees arrays so it's legal
128     // to index at _defs[OptoReg::Bad].
129     OptoReg::Name first = regalloc->get_reg_first(n);
130     OptoReg::Name second = regalloc->get_reg_second(n);
131     _defs[first] = n;
132     _defs[second] = n;
133 
134     // Pass callee-save info around copies
135     int idx = n->is_Copy();
136     if( idx ) {                 // Copies move callee-save info
137       OptoReg::Name old_first = regalloc->get_reg_first(n->in(idx));
138       OptoReg::Name old_second = regalloc->get_reg_second(n->in(idx));
139       int tmp_first = _callees[old_first];
140       int tmp_second = _callees[old_second];
141       _callees[old_first] = OptoReg::Bad; // callee-save is moved, dead in old location
142       _callees[old_second] = OptoReg::Bad;
143       _callees[first] = tmp_first;
144       _callees[second] = tmp_second;
145     } else if( n->is_Phi() ) {  // Phis do not mod callee-saves
146       assert( _callees[first] == _callees[regalloc->get_reg_first(n->in(1))], "" );
147       assert( _callees[second] == _callees[regalloc->get_reg_second(n->in(1))], "" );
148       assert( _callees[first] == _callees[regalloc->get_reg_first(n->in(n->req()-1))], "" );
149       assert( _callees[second] == _callees[regalloc->get_reg_second(n->in(n->req()-1))], "" );
150     } else {
151       _callees[first] = OptoReg::Bad; // No longer holding a callee-save value
152       _callees[second] = OptoReg::Bad;
153 
154       // Find base case for callee saves
155       if( n->is_Proj() && n->in(0)->is_Start() ) {
156         if( OptoReg::is_reg(first) &&
157             regalloc->_matcher.is_save_on_entry(first) )
158           _callees[first] = first;
159         if( OptoReg::is_reg(second) &&
160             regalloc->_matcher.is_save_on_entry(second) )
161           _callees[second] = second;
162       }
163     }
164   }
165 }
166 
167 // Merge the given flow into the 'this' flow
168 void OopFlow::merge( OopFlow *flow, int max_reg ) {
169   assert( _b == nullptr, "merging into a happy flow" );
170   assert( flow->_b, "this flow is still alive" );
171   assert( flow != this, "no self flow" );
172 
173   // Do the merge.  If there are any differences, drop to 'bottom' which
174   // is OptoReg::Bad or null depending.
175   for( int i=0; i<max_reg; i++ ) {
176     // Merge the callee-save's
177     if( _callees[i] != flow->_callees[i] )
178       _callees[i] = OptoReg::Bad;
179     // Merge the reaching defs
180     if( _defs[i] != flow->_defs[i] )
181       _defs[i] = nullptr;
182   }
183 
184 }
185 
186 void OopFlow::clone( OopFlow *flow, int max_size ) {
187   _b = flow->_b;
188   memcpy( _callees, flow->_callees, sizeof(short)*max_size);
189   memcpy( _defs   , flow->_defs   , sizeof(Node*)*max_size);
190 }
191 
192 OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
193   short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
194   Node **defs    = NEW_ARENA_ARRAY(A,Node*,max_size+1);
195   debug_only( memset(defs,0,(max_size+1)*sizeof(Node*)) );
196   OopFlow *flow = new (A) OopFlow(callees+1, defs+1, C);
197   assert( &flow->_callees[OptoReg::Bad] == callees, "Ok to index at OptoReg::Bad" );
198   assert( &flow->_defs   [OptoReg::Bad] == defs   , "Ok to index at OptoReg::Bad" );
199   return flow;
200 }
201 
202 static int get_live_bit( int *live, int reg ) {
203   return live[reg>>LogBitsPerInt] &   (1<<(reg&(BitsPerInt-1))); }
204 static void set_live_bit( int *live, int reg ) {
205          live[reg>>LogBitsPerInt] |=  (1<<(reg&(BitsPerInt-1))); }
206 static void clr_live_bit( int *live, int reg ) {
207          live[reg>>LogBitsPerInt] &= ~(1<<(reg&(BitsPerInt-1))); }
208 
209 // Build an oopmap from the current flow info
210 OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) {
211   int framesize = regalloc->_framesize;
212   int max_inarg_slot = OptoReg::reg2stack(regalloc->_matcher._new_SP);
213   debug_only( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0());
214               memset(dup_check,0,OptoReg::stack0()) );
215 
216   OopMap *omap = new OopMap( framesize,  max_inarg_slot );
217   MachCallNode *mcall = n->is_MachCall() ? n->as_MachCall() : nullptr;
218   JVMState* jvms = n->jvms();
219 
220   // For all registers do...
221   for( int reg=0; reg<max_reg; reg++ ) {
222     if( get_live_bit(live,reg) == 0 )
223       continue;                 // Ignore if not live
224 
225     // %%% C2 can use 2 OptoRegs when the physical register is only one 64bit
226     // register in that case we'll get an non-concrete register for the second
227     // half. We only need to tell the map the register once!
228     //
229     // However for the moment we disable this change and leave things as they
230     // were.
231 
232     VMReg r = OptoReg::as_VMReg(OptoReg::Name(reg), framesize, max_inarg_slot);
233 
234     // See if dead (no reaching def).
235     Node *def = _defs[reg];     // Get reaching def
236     assert( def, "since live better have reaching def" );
237 
238     if (def->is_MachTemp()) {
239       assert(!def->bottom_type()->isa_oop_ptr(),
240              "ADLC only assigns OOP types to MachTemp defs corresponding to xRegN operands");
241       // Exclude MachTemp definitions even if they are typed as oops.
242       continue;
243     }
244 
245     // Classify the reaching def as oop, derived, callee-save, dead, or other
246     const Type *t = def->bottom_type();
247     if( t->isa_oop_ptr() ) {    // Oop or derived?
248       assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
249 #ifdef _LP64
250       // 64-bit pointers record oop-ishness on 2 aligned adjacent registers.
251       // Make sure both are record from the same reaching def, but do not
252       // put both into the oopmap.
253       if( (reg&1) == 1 ) {      // High half of oop-pair?
254         assert( _defs[reg-1] == _defs[reg], "both halves from same reaching def" );
255         continue;               // Do not record high parts in oopmap
256       }
257 #endif
258 
259       // Check for a legal reg name in the oopMap and bailout if it is not.
260       if (!omap->legal_vm_reg_name(r)) {
261         stringStream ss;
262         ss.print("illegal oopMap register name: ");
263         r->print_on(&ss);
264         assert(false, "%s", ss.as_string());
265         regalloc->C->record_method_not_compilable(ss.as_string());
266         continue;
267       }
268       if (t->is_ptr()->offset() == 0) { // Not derived?
269         if( mcall ) {
270           // Outgoing argument GC mask responsibility belongs to the callee,
271           // not the caller.  Inspect the inputs to the call, to see if
272           // this live-range is one of them.
273           uint cnt = mcall->tf()->domain_cc()->cnt();
274           uint j;
275           for( j = TypeFunc::Parms; j < cnt; j++)
276             if( mcall->in(j) == def )
277               break;            // reaching def is an argument oop
278           if( j < cnt )         // arg oops dont go in GC map
279             continue;           // Continue on to the next register
280         }
281         omap->set_oop(r);
282       } else {                  // Else it's derived.
283         // Find the base of the derived value.
284         uint i;
285         // Fast, common case, scan
286         for( i = jvms->oopoff(); i < n->req(); i+=2 )
287           if( n->in(i) == def ) break; // Common case
288         if( i == n->req() ) {   // Missed, try a more generous scan
289           // Scan again, but this time peek through copies
290           for( i = jvms->oopoff(); i < n->req(); i+=2 ) {
291             Node *m = n->in(i); // Get initial derived value
292             while( 1 ) {
293               Node *d = def;    // Get initial reaching def
294               while( 1 ) {      // Follow copies of reaching def to end
295                 if( m == d ) goto found; // breaks 3 loops
296                 int idx = d->is_Copy();
297                 if( !idx ) break;
298                 d = d->in(idx);     // Link through copy
299               }
300               int idx = m->is_Copy();
301               if( !idx ) break;
302               m = m->in(idx);
303             }
304           }
305           guarantee( 0, "must find derived/base pair" );
306         }
307       found: ;
308         Node *base = n->in(i+1); // Base is other half of pair
309         int breg = regalloc->get_reg_first(base);
310         VMReg b = OptoReg::as_VMReg(OptoReg::Name(breg), framesize, max_inarg_slot);
311 
312         // I record liveness at safepoints BEFORE I make the inputs
313         // live.  This is because argument oops are NOT live at a
314         // safepoint (or at least they cannot appear in the oopmap).
315         // Thus bases of base/derived pairs might not be in the
316         // liveness data but they need to appear in the oopmap.
317         if( get_live_bit(live,breg) == 0 ) {// Not live?
318           // Flag it, so next derived pointer won't re-insert into oopmap
319           set_live_bit(live,breg);
320           // Already missed our turn?
321           if( breg < reg ) {
322             omap->set_oop(b);
323           }
324         }
325         omap->set_derived_oop(r, b);
326       }
327 
328     } else if( t->isa_narrowoop() ) {
329       assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
330       // Check for a legal reg name in the oopMap and bailout if it is not.
331       if (!omap->legal_vm_reg_name(r)) {
332         stringStream ss;
333         ss.print("illegal oopMap register name: ");
334         r->print_on(&ss);
335         assert(false, "%s", ss.as_string());
336         regalloc->C->record_method_not_compilable(ss.as_string());
337         continue;
338       }
339       if( mcall ) {
340           // Outgoing argument GC mask responsibility belongs to the callee,
341           // not the caller.  Inspect the inputs to the call, to see if
342           // this live-range is one of them.
343         uint cnt = mcall->tf()->domain_cc()->cnt();
344         uint j;
345         for( j = TypeFunc::Parms; j < cnt; j++)
346           if( mcall->in(j) == def )
347             break;            // reaching def is an argument oop
348         if( j < cnt )         // arg oops dont go in GC map
349           continue;           // Continue on to the next register
350       }
351       omap->set_narrowoop(r);
352     } else if( OptoReg::is_valid(_callees[reg])) { // callee-save?
353       // It's a callee-save value
354       assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" );
355       debug_only( dup_check[_callees[reg]]=1; )
356       VMReg callee = OptoReg::as_VMReg(OptoReg::Name(_callees[reg]));
357       omap->set_callee_saved(r, callee);
358 
359     } else {
360       // Other - some reaching non-oop value
361 #ifdef ASSERT
362       if (t->isa_rawptr()) {
363         ResourceMark rm;
364         Unique_Node_List worklist;
365         worklist.push(def);
366         for (uint i = 0; i < worklist.size(); i++) {
367           Node* m = worklist.at(i);
368           if (C->cfg()->_raw_oops.member(m)) {
369             def->dump();
370             m->dump();
371             n->dump();
372             assert(false, "there should be an oop in OopMap instead of a live raw oop at safepoint");
373           }
374           // Check users as well because def might be spilled
375           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
376             Node* u = m->fast_out(j);
377             if ((u->is_SpillCopy() && u->in(1) == m) || u->is_Phi()) {
378               worklist.push(u);
379             }
380           }
381         }
382       }
383 #endif
384     }
385 
386   }
387 
388 #ifdef ASSERT
389   /* Nice, Intel-only assert
390   int cnt_callee_saves=0;
391   int reg2 = 0;
392   while (OptoReg::is_reg(reg2)) {
393     if( dup_check[reg2] != 0) cnt_callee_saves++;
394     assert( cnt_callee_saves==3 || cnt_callee_saves==5, "missed some callee-save" );
395     reg2++;
396   }
397   */
398 #endif
399 
400 #ifdef ASSERT
401   bool has_derived_oops = false;
402   for( OopMapStream oms1(omap); !oms1.is_done(); oms1.next()) {
403     OopMapValue omv1 = oms1.current();
404     if (omv1.type() != OopMapValue::derived_oop_value) {
405       continue;
406     }
407     has_derived_oops = true;
408     bool found = false;
409     for( OopMapStream oms2(omap); !oms2.is_done(); oms2.next()) {
410       OopMapValue omv2 = oms2.current();
411       if (omv2.type() != OopMapValue::oop_value) {
412         continue;
413       }
414       if( omv1.content_reg() == omv2.reg() ) {
415         found = true;
416         break;
417       }
418     }
419     assert(has_derived_oops == omap->has_derived_oops(), "");
420     assert( found, "derived with no base in oopmap" );
421   }
422 
423   int num_oops = 0;
424   for (OopMapStream oms2(omap); !oms2.is_done(); oms2.next()) {
425     OopMapValue omv = oms2.current();
426     if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
427       num_oops++;
428     }
429   }
430   assert(num_oops == omap->num_oops(), "num_oops: %d omap->num_oops(): %d", num_oops, omap->num_oops());
431 #endif
432 
433   return omap;
434 }
435 
436 // Compute backwards liveness on registers
437 static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* worklist, int max_reg_ints, Arena* A, Dict* safehash) {
438   int* live = NEW_ARENA_ARRAY(A, int, (cfg->number_of_blocks() + 1) * max_reg_ints);
439   int* tmp_live = &live[cfg->number_of_blocks() * max_reg_ints];
440   Node* root = cfg->get_root_node();
441   // On CISC platforms, get the node representing the stack pointer  that regalloc
442   // used for spills
443   Node *fp = NodeSentinel;
444   if (UseCISCSpill && root->req() > 1) {
445     fp = root->in(1)->in(TypeFunc::FramePtr);
446   }
447   memset(live, 0, cfg->number_of_blocks() * (max_reg_ints << LogBytesPerInt));
448   // Push preds onto worklist
449   for (uint i = 1; i < root->req(); i++) {
450     Block* block = cfg->get_block_for_node(root->in(i));
451     worklist->push(block);
452   }
453 
454   // ZKM.jar includes tiny infinite loops which are unreached from below.
455   // If we missed any blocks, we'll retry here after pushing all missed
456   // blocks on the worklist.  Normally this outer loop never trips more
457   // than once.
458   while (1) {
459 
460     while( worklist->size() ) { // Standard worklist algorithm
461       Block *b = worklist->rpop();
462 
463       // Copy first successor into my tmp_live space
464       int s0num = b->_succs[0]->_pre_order;
465       int *t = &live[s0num*max_reg_ints];
466       for( int i=0; i<max_reg_ints; i++ )
467         tmp_live[i] = t[i];
468 
469       // OR in the remaining live registers
470       for( uint j=1; j<b->_num_succs; j++ ) {
471         uint sjnum = b->_succs[j]->_pre_order;
472         int *t = &live[sjnum*max_reg_ints];
473         for( int i=0; i<max_reg_ints; i++ )
474           tmp_live[i] |= t[i];
475       }
476 
477       // Now walk tmp_live up the block backwards, computing live
478       for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
479         Node *n = b->get_node(k);
480         // KILL def'd bits
481         int first = regalloc->get_reg_first(n);
482         int second = regalloc->get_reg_second(n);
483         if( OptoReg::is_valid(first) ) clr_live_bit(tmp_live,first);
484         if( OptoReg::is_valid(second) ) clr_live_bit(tmp_live,second);
485 
486         MachNode *m = n->is_Mach() ? n->as_Mach() : nullptr;
487 
488         // Check if m is potentially a CISC alternate instruction (i.e, possibly
489         // synthesized by RegAlloc from a conventional instruction and a
490         // spilled input)
491         bool is_cisc_alternate = false;
492         if (UseCISCSpill && m) {
493           is_cisc_alternate = m->is_cisc_alternate();
494         }
495 
496         // GEN use'd bits
497         for( uint l=1; l<n->req(); l++ ) {
498           Node *def = n->in(l);
499           assert(def != nullptr, "input edge required");
500           int first = regalloc->get_reg_first(def);
501           int second = regalloc->get_reg_second(def);
502           //If peephole had removed the node,do not set live bit for it.
503           if (!(def->is_Mach() && def->as_Mach()->get_removed())) {
504             if (OptoReg::is_valid(first)) set_live_bit(tmp_live,first);
505             if (OptoReg::is_valid(second)) set_live_bit(tmp_live,second);
506           }
507           // If we use the stack pointer in a cisc-alternative instruction,
508           // check for use as a memory operand.  Then reconstruct the RegName
509           // for this stack location, and set the appropriate bit in the
510           // live vector 4987749.
511           if (is_cisc_alternate && def == fp) {
512             const TypePtr *adr_type = nullptr;
513             intptr_t offset;
514             const Node* base = m->get_base_and_disp(offset, adr_type);
515             if (base == NodeSentinel) {
516               // Machnode has multiple memory inputs. We are unable to reason
517               // with these, but are presuming (with trepidation) that not any of
518               // them are oops. This can be fixed by making get_base_and_disp()
519               // look at a specific input instead of all inputs.
520               assert(!def->bottom_type()->isa_oop_ptr(), "expecting non-oop mem input");
521             } else if (base != fp || offset == Type::OffsetBot) {
522               // Do nothing: the fp operand is either not from a memory use
523               // (base == nullptr) OR the fp is used in a non-memory context
524               // (base is some other register) OR the offset is not constant,
525               // so it is not a stack slot.
526             } else {
527               assert(offset >= 0, "unexpected negative offset");
528               offset -= (offset % jintSize);  // count the whole word
529               int stack_reg = regalloc->offset2reg(offset);
530               if (OptoReg::is_stack(stack_reg)) {
531                 set_live_bit(tmp_live, stack_reg);
532               } else {
533                 assert(false, "stack_reg not on stack?");
534               }
535             }
536           }
537         }
538 
539         if( n->jvms() ) {       // Record liveness at safepoint
540 
541           // This placement of this stanza means inputs to calls are
542           // considered live at the callsite's OopMap.  Argument oops are
543           // hence live, but NOT included in the oopmap.  See cutout in
544           // build_oop_map.  Debug oops are live (and in OopMap).
545           int *n_live = NEW_ARENA_ARRAY(A, int, max_reg_ints);
546           for( int l=0; l<max_reg_ints; l++ )
547             n_live[l] = tmp_live[l];
548           safehash->Insert(n,n_live);
549         }
550 
551       }
552 
553       // Now at block top, see if we have any changes.  If so, propagate
554       // to prior blocks.
555       int *old_live = &live[b->_pre_order*max_reg_ints];
556       int l;
557       for( l=0; l<max_reg_ints; l++ )
558         if( tmp_live[l] != old_live[l] )
559           break;
560       if( l<max_reg_ints ) {     // Change!
561         // Copy in new value
562         for( l=0; l<max_reg_ints; l++ )
563           old_live[l] = tmp_live[l];
564         // Push preds onto worklist
565         for (l = 1; l < (int)b->num_preds(); l++) {
566           Block* block = cfg->get_block_for_node(b->pred(l));
567           worklist->push(block);
568         }
569       }
570     }
571 
572     // Scan for any missing safepoints.  Happens to infinite loops
573     // ala ZKM.jar
574     uint i;
575     for (i = 1; i < cfg->number_of_blocks(); i++) {
576       Block* block = cfg->get_block(i);
577       uint j;
578       for (j = 1; j < block->number_of_nodes(); j++) {
579         if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == nullptr) {
580            break;
581         }
582       }
583       if (j < block->number_of_nodes()) {
584         break;
585       }
586     }
587     if (i == cfg->number_of_blocks()) {
588       break;                    // Got 'em all
589     }
590 
591     if (PrintOpto && Verbose) {
592       tty->print_cr("retripping live calc");
593     }
594 
595     // Force the issue (expensively): recheck everybody
596     for (i = 1; i < cfg->number_of_blocks(); i++) {
597       worklist->push(cfg->get_block(i));
598     }
599   }
600 }
601 
602 // Collect GC mask info - where are all the OOPs?
603 void PhaseOutput::BuildOopMaps() {
604   Compile::TracePhase tp("bldOopMaps", &timers[_t_buildOopMaps]);
605   // Can't resource-mark because I need to leave all those OopMaps around,
606   // or else I need to resource-mark some arena other than the default.
607   // ResourceMark rm;              // Reclaim all OopFlows when done
608   int max_reg = C->regalloc()->_max_reg; // Current array extent
609 
610   Arena *A = Thread::current()->resource_area();
611   Block_List worklist;          // Worklist of pending blocks
612 
613   int max_reg_ints = align_up(max_reg, BitsPerInt)>>LogBitsPerInt;
614   Dict *safehash = nullptr;        // Used for assert only
615   // Compute a backwards liveness per register.  Needs a bitarray of
616   // #blocks x (#registers, rounded up to ints)
617   safehash = new Dict(cmpkey,hashkey,A);
618   do_liveness( C->regalloc(), C->cfg(), &worklist, max_reg_ints, A, safehash );
619   OopFlow *free_list = nullptr;    // Free, unused
620 
621   // Array mapping blocks to completed oopflows
622   OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, C->cfg()->number_of_blocks());
623   memset( flows, 0, C->cfg()->number_of_blocks() * sizeof(OopFlow*) );
624 
625 
626   // Do the first block 'by hand' to prime the worklist
627   Block *entry = C->cfg()->get_block(1);
628   OopFlow *rootflow = OopFlow::make(A,max_reg,C);
629   // Initialize to 'bottom' (not 'top')
630   memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) );
631   memset( rootflow->_defs   ,            0, max_reg*sizeof(Node*) );
632   flows[entry->_pre_order] = rootflow;
633 
634   // Do the first block 'by hand' to prime the worklist
635   rootflow->_b = entry;
636   rootflow->compute_reach( C->regalloc(), max_reg, safehash );
637   for( uint i=0; i<entry->_num_succs; i++ )
638     worklist.push(entry->_succs[i]);
639 
640   // Now worklist contains blocks which have some, but perhaps not all,
641   // predecessors visited.
642   while( worklist.size() ) {
643     // Scan for a block with all predecessors visited, or any randoms slob
644     // otherwise.  All-preds-visited order allows me to recycle OopFlow
645     // structures rapidly and cut down on the memory footprint.
646     // Note: not all predecessors might be visited yet (must happen for
647     // irreducible loops).  This is OK, since every live value must have the
648     // SAME reaching def for the block, so any reaching def is OK.
649     uint i;
650 
651     Block *b = worklist.pop();
652     // Ignore root block
653     if (b == C->cfg()->get_root_block()) {
654       continue;
655     }
656     // Block is already done?  Happens if block has several predecessors,
657     // he can get on the worklist more than once.
658     if( flows[b->_pre_order] ) continue;
659 
660     // If this block has a visited predecessor AND that predecessor has this
661     // last block as his only undone child, we can move the OopFlow from the
662     // pred to this block.  Otherwise we have to grab a new OopFlow.
663     OopFlow *flow = nullptr;       // Flag for finding optimized flow
664     Block *pred = (Block*)((intptr_t)0xdeadbeef);
665     // Scan this block's preds to find a done predecessor
666     for (uint j = 1; j < b->num_preds(); j++) {
667       Block* p = C->cfg()->get_block_for_node(b->pred(j));
668       OopFlow *p_flow = flows[p->_pre_order];
669       if( p_flow ) {            // Predecessor is done
670         assert( p_flow->_b == p, "cross check" );
671         pred = p;               // Record some predecessor
672         // If all successors of p are done except for 'b', then we can carry
673         // p_flow forward to 'b' without copying, otherwise we have to draw
674         // from the free_list and clone data.
675         uint k;
676         for( k=0; k<p->_num_succs; k++ )
677           if( !flows[p->_succs[k]->_pre_order] &&
678               p->_succs[k] != b )
679             break;
680 
681         // Either carry-forward the now-unused OopFlow for b's use
682         // or draw a new one from the free list
683         if( k==p->_num_succs ) {
684           flow = p_flow;
685           break;                // Found an ideal pred, use him
686         }
687       }
688     }
689 
690     if( flow ) {
691       // We have an OopFlow that's the last-use of a predecessor.
692       // Carry it forward.
693     } else {                    // Draw a new OopFlow from the freelist
694       if( !free_list )
695         free_list = OopFlow::make(A,max_reg,C);
696       flow = free_list;
697       assert( flow->_b == nullptr, "oopFlow is not free" );
698       free_list = flow->_next;
699       flow->_next = nullptr;
700 
701       // Copy/clone over the data
702       flow->clone(flows[pred->_pre_order], max_reg);
703     }
704 
705     // Mark flow for block.  Blocks can only be flowed over once,
706     // because after the first time they are guarded from entering
707     // this code again.
708     assert( flow->_b == pred, "have some prior flow" );
709     flow->_b = nullptr;
710 
711     // Now push flow forward
712     flows[b->_pre_order] = flow;// Mark flow for this block
713     flow->_b = b;
714     flow->compute_reach( C->regalloc(), max_reg, safehash );
715 
716     // Now push children onto worklist
717     for( i=0; i<b->_num_succs; i++ )
718       worklist.push(b->_succs[i]);
719 
720   }
721 }