1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "libadt/vectset.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "opto/block.hpp"
29 #include "opto/c2compiler.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/cfgnode.hpp"
32 #include "opto/chaitin.hpp"
33 #include "opto/machnode.hpp"
34 #include "opto/opcodes.hpp"
35 #include "opto/phaseX.hpp"
36 #include "opto/rootnode.hpp"
37 #include "opto/runtime.hpp"
38 #include "runtime/deoptimization.hpp"
39
40 // Portions of code courtesy of Clifford Click
41
42 // Optimization - Graph Style
43
44 // To avoid float value underflow
45 #define MIN_BLOCK_FREQUENCY 1.e-35f
46
47 //----------------------------schedule_node_into_block-------------------------
48 // Insert node n into block b. Look for projections of n and make sure they
49 // are in b also.
50 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
51 // Set basic block of n, Add n to b,
52 map_node_to_block(n, b);
53 b->add_inst(n);
54
55 // After Matching, nearly any old Node may have projections trailing it.
56 // These are usually machine-dependent flags. In any case, they might
57 // float to another block below this one. Move them up.
58 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
59 Node* use = n->fast_out(i);
60 if (use->is_Proj()) {
61 Block* buse = get_block_for_node(use);
62 if (buse != b) { // In wrong block?
63 if (buse != nullptr) {
64 buse->find_remove(use); // Remove from wrong block
65 }
66 map_node_to_block(use, b);
67 b->add_inst(use);
68 }
69 }
70 }
71 }
72
73 //----------------------------replace_block_proj_ctrl-------------------------
74 // Nodes that have is_block_proj() nodes as their control need to use
75 // the appropriate Region for their actual block as their control since
76 // the projection will be in a predecessor block.
77 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
78 const Node *in0 = n->in(0);
79 assert(in0 != nullptr, "Only control-dependent");
80 const Node *p = in0->is_block_proj();
81 if (p != nullptr && p != n) { // Control from a block projection?
82 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
83 // Find trailing Region
84 Block *pb = get_block_for_node(in0); // Block-projection already has basic block
85 uint j = 0;
86 if (pb->_num_succs != 1) { // More then 1 successor?
87 // Search for successor
88 uint max = pb->number_of_nodes();
89 assert( max > 1, "" );
90 uint start = max - pb->_num_succs;
91 // Find which output path belongs to projection
92 for (j = start; j < max; j++) {
93 if( pb->get_node(j) == in0 )
94 break;
95 }
96 assert( j < max, "must find" );
97 // Change control to match head of successor basic block
98 j -= start;
99 }
100 n->set_req(0, pb->_succs[j]->head());
101 }
102 }
103
104 bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
105 assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");
106 if (dom_node == node) {
107 return true;
108 }
109 Block* d = find_block_for_node(dom_node);
110 Block* n = find_block_for_node(node);
111 assert(n != nullptr && d != nullptr, "blocks must exist");
112
113 if (d == n) {
114 if (dom_node->is_block_start()) {
115 return true;
116 }
117 if (node->is_block_start()) {
118 return false;
119 }
120 if (dom_node->is_block_proj()) {
121 return false;
122 }
123 if (node->is_block_proj()) {
124 return true;
125 }
126
127 assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");
128 assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");
129
130 // Neither 'node' nor 'dom_node' is a block start or block projection.
131 // Check if 'dom_node' is above 'node' in the control graph.
132 if (is_dominating_control(dom_node, node)) {
133 return true;
134 }
135
136 #ifdef ASSERT
137 // If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'
138 if (!is_dominating_control(node, dom_node)) {
139 node->dump();
140 dom_node->dump();
141 assert(false, "neither dom_node nor node dominates the other");
142 }
143 #endif
144
145 return false;
146 }
147 return d->dom_lca(n) == d;
148 }
149
150 bool PhaseCFG::is_CFG(Node* n) {
151 return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);
152 }
153
154 bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {
155 bool result = (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);
156 assert(!result || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint)
157 || (n->is_Proj() && n->as_Proj()->_con == 0), "If control projection, it must be projection 0");
158 return result;
159 }
160
161 Block* PhaseCFG::find_block_for_node(Node* n) const {
162 if (n->is_block_start() || n->is_block_proj()) {
163 return get_block_for_node(n);
164 } else {
165 // Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be
166 // an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.
167 assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");
168 Node* ctrl = n->in(0);
169 while (!ctrl->is_block_start()) {
170 ctrl = ctrl->in(0);
171 }
172 return get_block_for_node(ctrl);
173 }
174 }
175
176 // Walk up the control graph from 'n' and check if 'dom_ctrl' is found.
177 bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {
178 Node* ctrl = n->in(0);
179 while (!ctrl->is_block_start()) {
180 if (ctrl == dom_ctrl) {
181 return true;
182 }
183 ctrl = ctrl->in(0);
184 }
185 return false;
186 }
187
188
189 //------------------------------schedule_pinned_nodes--------------------------
190 // Set the basic block for Nodes pinned into blocks
191 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
192 // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
193 GrowableArray <Node*> spstack(C->live_nodes() + 8);
194 spstack.push(_root);
195 while (spstack.is_nonempty()) {
196 Node* node = spstack.pop();
197 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
198 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down!
199 assert(node->in(0), "pinned Node must have Control");
200 // Before setting block replace block_proj control edge
201 replace_block_proj_ctrl(node);
202 Node* input = node->in(0);
203 while (!input->is_block_start()) {
204 input = input->in(0);
205 }
206 Block* block = get_block_for_node(input); // Basic block of controlling input
207 schedule_node_into_block(node, block);
208 }
209
210 // If the node has precedence edges (added when CastPP nodes are
211 // removed in final_graph_reshaping), fix the control of the
212 // node to cover the precedence edges and remove the
213 // dependencies.
214 Node* n = nullptr;
215 for (uint i = node->len()-1; i >= node->req(); i--) {
216 Node* m = node->in(i);
217 if (m == nullptr) continue;
218 assert(is_CFG(m), "must be a CFG node");
219 node->rm_prec(i);
220 if (n == nullptr) {
221 n = m;
222 } else {
223 assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
224 n = is_dominator(n, m) ? m : n;
225 }
226 }
227 if (n != nullptr) {
228 assert(node->in(0), "control should have been set");
229 assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
230 if (!is_dominator(n, node->in(0))) {
231 node->set_req(0, n);
232 }
233 }
234
235 // process all inputs that are non null
236 for (int i = node->len()-1; i >= 0; --i) {
237 if (node->in(i) != nullptr) {
238 spstack.push(node->in(i));
239 }
240 }
241 }
242 }
243 }
244
245 // Assert that new input b2 is dominated by all previous inputs.
246 // Check this by by seeing that it is dominated by b1, the deepest
247 // input observed until b2.
248 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
249 if (b1 == nullptr) return;
250 assert(b1->_dom_depth < b2->_dom_depth, "sanity");
251 Block* tmp = b2;
252 while (tmp != b1 && tmp != nullptr) {
253 tmp = tmp->_idom;
254 }
255 if (tmp != b1) {
256 #ifdef ASSERT
257 // Detected an unschedulable graph. Print some nice stuff and die.
258 tty->print_cr("!!! Unschedulable graph !!!");
259 for (uint j=0; j<n->len(); j++) { // For all inputs
260 Node* inn = n->in(j); // Get input
261 if (inn == nullptr) continue; // Ignore null, missing inputs
262 Block* inb = cfg->get_block_for_node(inn);
263 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
264 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
265 inn->dump();
266 }
267 tty->print("Failing node: ");
268 n->dump();
269 assert(false, "unschedulable graph");
270 #endif
271 cfg->C->record_failure("unschedulable graph");
272 }
273 }
274
275 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
276 // Find the last input dominated by all other inputs.
277 Block* deepb = nullptr; // Deepest block so far
278 int deepb_dom_depth = 0;
279 for (uint k = 0; k < n->len(); k++) { // For all inputs
280 Node* inn = n->in(k); // Get input
281 if (inn == nullptr) continue; // Ignore null, missing inputs
282 Block* inb = cfg->get_block_for_node(inn);
283 assert(inb != nullptr, "must already have scheduled this input");
284 if (deepb_dom_depth < (int) inb->_dom_depth) {
285 // The new inb must be dominated by the previous deepb.
286 // The various inputs must be linearly ordered in the dom
287 // tree, or else there will not be a unique deepest block.
288 assert_dom(deepb, inb, n, cfg);
289 if (cfg->C->failing()) {
290 return nullptr;
291 }
292 deepb = inb; // Save deepest block
293 deepb_dom_depth = deepb->_dom_depth;
294 }
295 }
296 assert(deepb != nullptr, "must be at least one input to n");
297 return deepb;
298 }
299
300
301 //------------------------------schedule_early---------------------------------
302 // Find the earliest Block any instruction can be placed in. Some instructions
303 // are pinned into Blocks. Unpinned instructions can appear in last block in
304 // which all their inputs occur.
305 bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
306 // Allocate stack with enough space to avoid frequent realloc
307 Node_Stack nstack(roots.size() + 8);
308 // _root will be processed among C->top() inputs
309 roots.push(C->top(), 0);
310 visited.set(C->top()->_idx);
311
312 while (roots.size() != 0) {
313 // Use local variables nstack_top_n & nstack_top_i to cache values
314 // on stack's top.
315 Node* parent_node = roots.node();
316 uint input_index = 0;
317 roots.pop();
318
319 while (true) {
320 if (input_index == 0) {
321 // Fixup some control. Constants without control get attached
322 // to root and nodes that use is_block_proj() nodes should be attached
323 // to the region that starts their block.
324 const Node* control_input = parent_node->in(0);
325 if (control_input != nullptr) {
326 replace_block_proj_ctrl(parent_node);
327 } else {
328 // Is a constant with NO inputs?
329 if (parent_node->req() == 1) {
330 parent_node->set_req(0, _root);
331 }
332 }
333 }
334
335 // First, visit all inputs and force them to get a block. If an
336 // input is already in a block we quit following inputs (to avoid
337 // cycles). Instead we put that Node on a worklist to be handled
338 // later (since IT'S inputs may not have a block yet).
339
340 // Assume all n's inputs will be processed
341 bool done = true;
342
343 while (input_index < parent_node->len()) {
344 Node* in = parent_node->in(input_index++);
345 if (in == nullptr) {
346 continue;
347 }
348
349 int is_visited = visited.test_set(in->_idx);
350 if (!has_block(in)) {
351 if (is_visited) {
352 assert(false, "graph should be schedulable");
353 return false;
354 }
355 // Save parent node and next input's index.
356 nstack.push(parent_node, input_index);
357 // Process current input now.
358 parent_node = in;
359 input_index = 0;
360 // Not all n's inputs processed.
361 done = false;
362 break;
363 } else if (!is_visited) {
364 // Visit this guy later, using worklist
365 roots.push(in, 0);
366 }
367 }
368
369 if (done) {
370 // All of n's inputs have been processed, complete post-processing.
371
372 // Some instructions are pinned into a block. These include Region,
373 // Phi, Start, Return, and other control-dependent instructions and
374 // any projections which depend on them.
375 if (!parent_node->pinned()) {
376 // Set earliest legal block.
377 Block* earliest_block = find_deepest_input(parent_node, this);
378 if (C->failing()) {
379 return false;
380 }
381 map_node_to_block(parent_node, earliest_block);
382 } else {
383 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
384 }
385
386 if (nstack.is_empty()) {
387 // Finished all nodes on stack.
388 // Process next node on the worklist 'roots'.
389 break;
390 }
391 // Get saved parent node and next input's index.
392 parent_node = nstack.node();
393 input_index = nstack.index();
394 nstack.pop();
395 }
396 }
397 }
398 return true;
399 }
400
401 //------------------------------dom_lca----------------------------------------
402 // Find least common ancestor in dominator tree
403 // LCA is a current notion of LCA, to be raised above 'this'.
404 // As a convenient boundary condition, return 'this' if LCA is null.
405 // Find the LCA of those two nodes.
406 Block* Block::dom_lca(Block* LCA) {
407 if (LCA == nullptr || LCA == this) return this;
408
409 Block* anc = this;
410 while (anc->_dom_depth > LCA->_dom_depth)
411 anc = anc->_idom; // Walk up till anc is as high as LCA
412
413 while (LCA->_dom_depth > anc->_dom_depth)
414 LCA = LCA->_idom; // Walk up till LCA is as high as anc
415
416 while (LCA != anc) { // Walk both up till they are the same
417 LCA = LCA->_idom;
418 anc = anc->_idom;
419 }
420
421 return LCA;
422 }
423
424 //--------------------------raise_LCA_above_use--------------------------------
425 // We are placing a definition, and have been given a def->use edge.
426 // The definition must dominate the use, so move the LCA upward in the
427 // dominator tree to dominate the use. If the use is a phi, adjust
428 // the LCA only with the phi input paths which actually use this def.
429 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
430 Block* buse = cfg->get_block_for_node(use);
431 if (buse == nullptr) return LCA; // Unused killing Projs have no use block
432 if (!use->is_Phi()) return buse->dom_lca(LCA);
433 uint pmax = use->req(); // Number of Phi inputs
434 // Why does not this loop just break after finding the matching input to
435 // the Phi? Well...it's like this. I do not have true def-use/use-def
436 // chains. Means I cannot distinguish, from the def-use direction, which
437 // of many use-defs lead from the same use to the same def. That is, this
438 // Phi might have several uses of the same def. Each use appears in a
439 // different predecessor block. But when I enter here, I cannot distinguish
440 // which use-def edge I should find the predecessor block for. So I find
441 // them all. Means I do a little extra work if a Phi uses the same value
442 // more than once.
443 for (uint j=1; j<pmax; j++) { // For all inputs
444 if (use->in(j) == def) { // Found matching input?
445 Block* pred = cfg->get_block_for_node(buse->pred(j));
446 LCA = pred->dom_lca(LCA);
447 }
448 }
449 return LCA;
450 }
451
452 //----------------------------raise_LCA_above_marks----------------------------
453 // Return a new LCA that dominates LCA and any of its marked predecessors.
454 // Search all my parents up to 'early' (exclusive), looking for predecessors
455 // which are marked with the given index. Return the LCA (in the dom tree)
456 // of all marked blocks. If there are none marked, return the original
457 // LCA.
458 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
459 assert(early->dominates(LCA), "precondition failed");
460 Block_List worklist;
461 worklist.push(LCA);
462 while (worklist.size() > 0) {
463 Block* mid = worklist.pop();
464 if (mid == early) continue; // stop searching here
465
466 // Test and set the visited bit.
467 if (mid->raise_LCA_visited() == mark) continue; // already visited
468
469 // Don't process the current LCA, otherwise the search may terminate early
470 if (mid != LCA && mid->raise_LCA_mark() == mark) {
471 // Raise the LCA.
472 LCA = mid->dom_lca(LCA);
473 if (LCA == early) break; // stop searching everywhere
474 assert(early->dominates(LCA), "unsound LCA update");
475 // Resume searching at that point, skipping intermediate levels.
476 worklist.push(LCA);
477 if (LCA == mid)
478 continue; // Don't mark as visited to avoid early termination.
479 } else {
480 // Keep searching through this block's predecessors.
481 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
482 Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
483 worklist.push(mid_parent);
484 }
485 }
486 mid->set_raise_LCA_visited(mark);
487 }
488 return LCA;
489 }
490
491 //--------------------------memory_early_block--------------------------------
492 // This is a variation of find_deepest_input, the heart of schedule_early.
493 // Find the "early" block for a load, if we considered only memory and
494 // address inputs, that is, if other data inputs were ignored.
495 //
496 // Because a subset of edges are considered, the resulting block will
497 // be earlier (at a shallower dom_depth) than the true schedule_early
498 // point of the node. We compute this earlier block as a more permissive
499 // site for anti-dependency insertion, but only if subsume_loads is enabled.
500 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
501 Node* base;
502 Node* index;
503 Node* store = load->in(MemNode::Memory);
504 load->as_Mach()->memory_inputs(base, index);
505
506 assert(base != NodeSentinel && index != NodeSentinel,
507 "unexpected base/index inputs");
508
509 Node* mem_inputs[4];
510 int mem_inputs_length = 0;
511 if (base != nullptr) mem_inputs[mem_inputs_length++] = base;
512 if (index != nullptr) mem_inputs[mem_inputs_length++] = index;
513 if (store != nullptr) mem_inputs[mem_inputs_length++] = store;
514
515 // In the comparison below, add one to account for the control input,
516 // which may be null, but always takes up a spot in the in array.
517 if (mem_inputs_length + 1 < (int) load->req()) {
518 // This "load" has more inputs than just the memory, base and index inputs.
519 // For purposes of checking anti-dependences, we need to start
520 // from the early block of only the address portion of the instruction,
521 // and ignore other blocks that may have factored into the wider
522 // schedule_early calculation.
523 if (load->in(0) != nullptr) mem_inputs[mem_inputs_length++] = load->in(0);
524
525 Block* deepb = nullptr; // Deepest block so far
526 int deepb_dom_depth = 0;
527 for (int i = 0; i < mem_inputs_length; i++) {
528 Block* inb = cfg->get_block_for_node(mem_inputs[i]);
529 if (deepb_dom_depth < (int) inb->_dom_depth) {
530 // The new inb must be dominated by the previous deepb.
531 // The various inputs must be linearly ordered in the dom
532 // tree, or else there will not be a unique deepest block.
533 assert_dom(deepb, inb, load, cfg);
534 if (cfg->C->failing()) {
535 return nullptr;
536 }
537 deepb = inb; // Save deepest block
538 deepb_dom_depth = deepb->_dom_depth;
539 }
540 }
541 early = deepb;
542 }
543
544 return early;
545 }
546
547 // This function is used by raise_above_anti_dependences to find unrelated loads for stores in implicit null checks.
548 bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
549 // We expect an anti-dependence edge from 'load' to 'store', except when
550 // implicit_null_check() has hoisted 'store' above its early block to
551 // perform an implicit null check, and 'load' is placed in the null
552 // block. In this case it is safe to ignore the anti-dependence, as the
553 // null block is only reached if 'store' tries to write to null object and
554 // 'load' read from non-null object (there is preceding check for that)
555 // These objects can't be the same.
556 Block* store_block = get_block_for_node(store);
557 Block* load_block = get_block_for_node(load);
558 Node* end = store_block->end();
559 if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
560 Node* if_true = end->find_out_with(Op_IfTrue);
561 assert(if_true != nullptr, "null check without null projection");
562 Node* null_block_region = if_true->find_out_with(Op_Region);
563 assert(null_block_region != nullptr, "null check without null region");
564 return get_block_for_node(null_block_region) == load_block;
565 }
566 return false;
567 }
568
569 class DefUseMemStatesQueue : public StackObj {
570 private:
571 class DefUsePair : public StackObj {
572 private:
573 Node* _def; // memory state
574 Node* _use; // use of the memory state that also modifies the memory state
575
576 public:
577 DefUsePair(Node* def, Node* use) :
578 _def(def), _use(use) {
579 }
580
581 DefUsePair() :
582 _def(nullptr), _use(nullptr) {
583 }
584
585 Node* def() const {
586 return _def;
587 }
588
589 Node* use() const {
590 return _use;
591 }
592 };
593
594 GrowableArray<DefUsePair> _queue;
595 GrowableArray<MergeMemNode*> _worklist_visited; // visited mergemem nodes
596
597 bool already_enqueued(Node* def_mem, PhiNode* use_phi) const {
598 // def_mem is one of the inputs of use_phi and at least one input of use_phi is
599 // not def_mem. It's however possible that use_phi has def_mem as input multiple
600 // times. If that happens, use_phi is recorded as a use of def_mem multiple
601 // times as well. When PhaseCFG::raise_above_anti_dependences() goes over
602 // uses of def_mem and enqueues them for processing, use_phi would then be
603 // enqueued for processing multiple times when it only needs to be
604 // processed once. The code below checks if use_phi as a use of def_mem was
605 // already enqueued to avoid redundant processing of use_phi.
606 int j = _queue.length()-1;
607 // If there are any use of def_mem already enqueued, they were enqueued
608 // last (all use of def_mem are processed in one go).
609 for (; j >= 0; j--) {
610 const DefUsePair& def_use_pair = _queue.at(j);
611 if (def_use_pair.def() != def_mem) {
612 // We're done with the uses of def_mem
613 break;
614 }
615 if (def_use_pair.use() == use_phi) {
616 return true;
617 }
618 }
619 #ifdef ASSERT
620 for (; j >= 0; j--) {
621 const DefUsePair& def_use_pair = _queue.at(j);
622 assert(def_use_pair.def() != def_mem, "Should be done with the uses of def_mem");
623 }
624 #endif
625 return false;
626 }
627
628 public:
629 DefUseMemStatesQueue(ResourceArea* area) {
630 }
631
632 void push(Node* def_mem_state, Node* use_mem_state) {
633 if (use_mem_state->is_MergeMem()) {
634 // Be sure we don't get into combinatorial problems.
635 if (!_worklist_visited.append_if_missing(use_mem_state->as_MergeMem())) {
636 return; // already on work list; do not repeat
637 }
638 } else if (use_mem_state->is_Phi()) {
639 // A Phi could have the same mem as input multiple times. If that's the case, we don't need to enqueue it
640 // more than once. We otherwise allow phis to be repeated; they can merge two relevant states.
641 if (already_enqueued(def_mem_state, use_mem_state->as_Phi())) {
642 return;
643 }
644 }
645
646 _queue.push(DefUsePair(def_mem_state, use_mem_state));
647 }
648
649 bool is_nonempty() const {
650 return _queue.is_nonempty();
651 }
652
653 Node* top_def() const {
654 return _queue.top().def();
655 }
656
657 Node* top_use() const {
658 return _queue.top().use();
659 }
660
661 void pop() {
662 _queue.pop();
663 }
664 };
665
666 // Enforce a scheduling of the given 'load' that ensures anti-dependent stores
667 // do not overwrite the load's input memory state before the load executes.
668 //
669 // The given 'load' has a current scheduling range in the dominator tree that
670 // starts at the load's early block (computed in schedule_early) and ends at
671 // the given 'LCA' block for the load. However, there may still exist
672 // anti-dependent stores between the early block and the LCA that overwrite
673 // memory that the load must witness. For such stores, we must
674 //
675 // 1. raise the load's LCA to force the load to (eventually) be scheduled at
676 // latest in the store's block, and
677 // 2. if the load may get scheduled in the store's block, additionally insert
678 // an anti-dependence edge (i.e., precedence edge) from the load to the
679 // store to ensure LCM schedules the load before the store within the
680 // block.
681 //
682 // For a given store, we say that the store is on a _distinct_ control-flow
683 // path relative to the load if there are no paths from early to LCA that go
684 // through the store's block. Such stores are not anti-dependent, and there is
685 // no need to update the LCA nor to add anti-dependence edges.
686 //
687 // Due to the presence of loops, we must also raise the LCA above
688 // anti-dependent memory Phis. We defer the details (see later comments in the
689 // method) and for now look at an example without loops.
690 //
691 // CFG DOMINATOR TREE
692 //
693 // B1 (early,L) B1
694 // |\________ /\\___
695 // | \ / \ \
696 // B2 (L,S) \ B2 B7 B6
697 // / \ \ /\\___
698 // B3 B4 (S) B7 (S) / \ \
699 // \ / / B3 B4 B5
700 // B5 (LCA,L) /
701 // \ ____/
702 // \ /
703 // B6
704 //
705 // Here, the load's scheduling range when calling raise_above_anti_dependences
706 // is between early and LCA in the dominator tree, i.e., in block B1, B2, or B5
707 // (indicated with "L"). However, there are a number of stores (indicated with
708 // "S") that overwrite the memory which the load must witness. First, consider
709 // the store in B4. We cannot legally schedule the load in B4, so an
710 // anti-dependence edge is redundant. However, we must raise the LCA above
711 // B4, which means that the updated LCA is B2. Now, consider the store in B2.
712 // The LCA is already B2, so we do not need to raise it any further.
713 // If we, eventually, decide to schedule the load in B2, it could happen that
714 // LCM decides to place the load after the anti-dependent store in B2.
715 // Therefore, we now need to add an anti-dependence edge between the load and
716 // the B2 store, ensuring that the load is scheduled before the store. Finally,
717 // the store in B7 is on a distinct control-flow path. Therefore, B7 requires
718 // no action.
719 //
720 // The raise_above_anti_dependences method returns the updated LCA and ensures
721 // there are no anti-dependent stores in any block between the load's early
722 // block and the updated LCA. Any stores in the updated LCA will have new
723 // anti-dependence edges back to the load. The caller may schedule the load in
724 // the updated LCA, or it may hoist the load above the updated LCA, if the
725 // updated LCA is not the early block.
726 Block* PhaseCFG::raise_above_anti_dependences(Block* LCA, Node* load, const bool verify) {
727 ResourceMark rm;
728 assert(load->needs_anti_dependence_check(), "must be a load of some sort");
729 assert(LCA != nullptr, "");
730 DEBUG_ONLY(Block* LCA_orig = LCA);
731
732 // Compute the alias index. Loads and stores with different alias indices
733 // do not need anti-dependence edges.
734 int load_alias_idx = C->get_alias_index(load->adr_type());
735 #ifdef ASSERT
736 assert(Compile::AliasIdxTop <= load_alias_idx && load_alias_idx < C->num_alias_types(), "Invalid alias index");
737 if (load_alias_idx == Compile::AliasIdxBot && C->do_aliasing() &&
738 (PrintOpto || VerifyAliases ||
739 (PrintMiscellaneous && (WizardMode || Verbose)))) {
740 // Load nodes should not consume all of memory.
741 // Reporting a bottom type indicates a bug in adlc.
742 // If some particular type of node validly consumes all of memory,
743 // sharpen the preceding "if" to exclude it, so we can catch bugs here.
744 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory.");
745 load->dump(2);
746 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, "");
747 }
748 #endif
749
750 if (!C->alias_type(load_alias_idx)->is_rewritable()) {
751 // It is impossible to spoil this load by putting stores before it,
752 // because we know that the stores will never update the value
753 // which 'load' must witness.
754 return LCA;
755 }
756
757 node_idx_t load_index = load->_idx;
758
759 // Record the earliest legal placement of 'load', as determined by the unique
760 // point in the dominator tree where all memory effects and other inputs are
761 // first available (computed by schedule_early). For normal loads, 'early' is
762 // the shallowest place (dominator-tree wise) to look for anti-dependences
763 // between this load and any store.
764 Block* early = get_block_for_node(load);
765
766 // If we are subsuming loads, compute an "early" block that only considers
767 // memory or address inputs. This block may be different from the
768 // schedule_early block when it is at an even shallower depth in the
769 // dominator tree, and allow for a broader discovery of anti-dependences.
770 if (C->subsume_loads()) {
771 early = memory_early_block(load, early, this);
772 if (C->failing()) {
773 return nullptr;
774 }
775 }
776
777 assert(early->dominates(LCA_orig), "precondition failed");
778
779 ResourceArea* area = Thread::current()->resource_area();
780
781 // Bookkeeping of possibly anti-dependent stores that we find outside the
782 // early block and that may need anti-dependence edges. Note that stores in
783 // non_early_stores are not necessarily dominated by early. The search starts
784 // from initial_mem, which can reside in a block that dominates early, and
785 // therefore, stores we find may be in blocks that are on completely distinct
786 // control-flow paths compared to early. However, in the end, only stores in
787 // blocks dominated by early matter. The reason for bookkeeping not only
788 // relevant stores is efficiency: we lazily record all possible
789 // anti-dependent stores and add anti-dependence edges only to the relevant
790 // ones at the very end of this method when we know the final updated LCA.
791 Node_List non_early_stores(area);
792
793 // Whether we must raise the LCA after the main worklist loop below.
794 bool must_raise_LCA_above_marks = false;
795
796 // The input load uses some memory state (initial_mem).
797 Node* initial_mem = load->in(MemNode::Memory);
798 // To find anti-dependences we must look for users of the same memory state.
799 // To do this, we search the memory graph downwards from initial_mem. During
800 // this search, we encounter different types of nodes that we handle
801 // according to the following three categories:
802 //
803 // - MergeMems
804 // - Memory-state-modifying nodes (informally referred to as "stores" above
805 // and below)
806 // - Memory Phis
807 //
808 // MergeMems do not modify the memory state. Anti-dependent stores or memory
809 // Phis may, however, exist downstream of MergeMems. Therefore, we must
810 // permit the search to continue through MergeMems. Stores may raise the LCA
811 // and may potentially also require an anti-dependence edge. Memory Phis may
812 // raise the LCA but never require anti-dependence edges. See the comments
813 // throughout the worklist loop below for further details.
814 //
815 // It may be useful to think of the anti-dependence search as traversing a
816 // tree rooted at initial_mem, with internal nodes of type MergeMem and
817 // memory Phis and stores as (potentially repeated) leaves.
818
819 // We don't optimize the memory graph for pinned loads, so we may need to raise the
820 // root of our search tree through the corresponding slices of MergeMem nodes to
821 // get to the node that really creates the memory state for this slice.
822 if (load_alias_idx >= Compile::AliasIdxRaw) {
823 while (initial_mem->is_MergeMem()) {
824 MergeMemNode* mm = initial_mem->as_MergeMem();
825 Node* p = mm->memory_at(load_alias_idx);
826 if (p != mm->base_memory()) {
827 initial_mem = p;
828 } else {
829 break;
830 }
831 }
832 }
833 // To administer the search, we use a worklist consisting of (def,use)-pairs
834 // of memory states, corresponding to edges in the search tree (and edges
835 // in the memory graph). We need to keep track of search tree edges in the
836 // worklist rather than individual nodes due to memory Phis (see details
837 // below).
838 DefUseMemStatesQueue worklist(area);
839 // We start the search at initial_mem and indicate the search root with the
840 // edge (nullptr, initial_mem).
841 worklist.push(nullptr, initial_mem);
842
843 // The worklist loop
844 while (worklist.is_nonempty()) {
845 // Pop the next edge from the worklist
846 Node* def_mem_state = worklist.top_def();
847 Node* use_mem_state = worklist.top_use();
848 worklist.pop();
849
850 // We are either
851 // - at the root of the search with the edge (nullptr, initial_mem),
852 // - just past initial_mem with the edge (initial_mem, use_mem_state), or
853 // - just past a MergeMem with the edge (MergeMem, use_mem_state).
854 assert(def_mem_state == nullptr || def_mem_state == initial_mem ||
855 def_mem_state->is_MergeMem(),
856 "unexpected memory state");
857
858 const uint op = use_mem_state->Opcode();
859
860 #ifdef ASSERT
861 // CacheWB nodes are peculiar in a sense that they both are anti-dependent and produce memory.
862 // Allow them to be treated as a store.
863 bool is_cache_wb = false;
864 if (use_mem_state->is_Mach()) {
865 int ideal_op = use_mem_state->as_Mach()->ideal_Opcode();
866 is_cache_wb = (ideal_op == Op_CacheWB);
867 }
868 assert(!use_mem_state->needs_anti_dependence_check() || is_cache_wb, "no loads");
869 #endif
870
871 // If we are either at the search root or have found a MergeMem, we step
872 // past use_mem_state and populate the search worklist with edges
873 // (use_mem_state, child) for use_mem_state's children.
874 if (def_mem_state == nullptr // root (exclusive) of tree we are searching
875 || op == Op_MergeMem // internal node of tree we are searching
876 ) {
877 def_mem_state = use_mem_state;
878
879 for (DUIterator_Fast imax, i = def_mem_state->fast_outs(imax); i < imax; i++) {
880 use_mem_state = def_mem_state->fast_out(i);
881 if (use_mem_state->needs_anti_dependence_check()) {
882 // use_mem_state is also a kind of load (i.e.,
883 // needs_anti_dependence_check), and it is not a store nor a memory
884 // Phi. Hence, it is not anti-dependent on the load.
885 continue;
886 }
887 worklist.push(def_mem_state, use_mem_state);
888 }
889 // Nothing more to do for the current (nullptr, initial_mem) or
890 // (initial_mem/MergeMem, MergeMem) edge, move on.
891 continue;
892 }
893
894 assert(!use_mem_state->is_MergeMem(),
895 "use_mem_state should be either a store or a memory Phi");
896
897 if (op == Op_MachProj || op == Op_Catch) continue;
898
899 // Compute the alias index. If the use_mem_state has an alias index
900 // different from the load's, it is not anti-dependent. Wide MemBar's
901 // are anti-dependent with everything (except immutable memories).
902 const TypePtr* adr_type = use_mem_state->adr_type();
903 if (!C->can_alias(adr_type, load_alias_idx)) continue;
904
905 // Most slow-path runtime calls do NOT modify Java memory, but
906 // they can block and so write Raw memory.
907 if (use_mem_state->is_Mach()) {
908 MachNode* muse = use_mem_state->as_Mach();
909 if (load_alias_idx != Compile::AliasIdxRaw) {
910 // Check for call into the runtime using the Java calling
911 // convention (and from there into a wrapper); it has no
912 // _method. Can't do this optimization for Native calls because
913 // they CAN write to Java memory.
914 if (muse->ideal_Opcode() == Op_CallStaticJava) {
915 assert(muse->is_MachSafePoint(), "");
916 MachSafePointNode* ms = (MachSafePointNode*)muse;
917 assert(ms->is_MachCallJava(), "");
918 MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
919 if (mcj->_method == nullptr) {
920 // These runtime calls do not write to Java visible memory
921 // (other than Raw) and so are not anti-dependent.
922 continue;
923 }
924 }
925 // Same for SafePoints: they read/write Raw but only read otherwise.
926 // This is basically a workaround for SafePoints only defining control
927 // instead of control + memory.
928 if (muse->ideal_Opcode() == Op_SafePoint) {
929 continue;
930 }
931 } else {
932 // Some raw memory, such as the load of "top" at an allocation,
933 // can be control dependent on the previous safepoint. See
934 // comments in GraphKit::allocate_heap() about control input.
935 // Inserting an anti-dependence edge between such a safepoint and a use
936 // creates a cycle, and will cause a subsequent failure in
937 // local scheduling. (BugId 4919904)
938 // (%%% How can a control input be a safepoint and not a projection??)
939 if (muse->ideal_Opcode() == Op_SafePoint && load->in(0) == muse) {
940 continue;
941 }
942 }
943 }
944
945 // Determine the block of the use_mem_state.
946 Block* use_mem_state_block = get_block_for_node(use_mem_state);
947 assert(use_mem_state_block != nullptr,
948 "unused killing projections skipped above");
949
950 // For efficiency, we take a lazy approach to both raising the LCA and
951 // adding anti-dependence edges. In this worklist loop, we only mark blocks
952 // which we must raise the LCA above (set_raise_LCA_mark), and keep
953 // track of nodes that potentially need anti-dependence edges
954 // (non_early_stores). The only exceptions to this are if we
955 // immediately see that we have to raise the LCA all the way to the early
956 // block, and if we find stores in the early block (which always need
957 // anti-dependence edges).
958 //
959 // After the worklist loop, we perform an efficient combined LCA-raising
960 // operation over all marks and only then add anti-dependence edges where
961 // strictly necessary according to the new raised LCA.
962
963 if (use_mem_state->is_Phi()) {
964 // We have reached a memory Phi node. On our search from initial_mem to
965 // the Phi, we have found no anti-dependences (otherwise, we would have
966 // already terminated the search along this branch). Consider the example
967 // below, indicating a Phi node and its node inputs (we omit the control
968 // input).
969 //
970 // def_mem_state
971 // |
972 // | ? ?
973 // \ | /
974 // Phi
975 //
976 // We reached the Phi from def_mem_state and know that, on this
977 // particular input, the memory that the load must witness is not
978 // overwritten. However, for the Phi's other inputs (? in the
979 // illustration), we have no information and must thus conservatively
980 // assume that the load's memory is overwritten at and below the Phi.
981 //
982 // It is impossible to schedule the load before the Phi in
983 // the same block as the Phi (use_mem_state_block), and anti-dependence
984 // edges are, therefore, redundant. We must, however, find the
985 // predecessor block of use_mem_state_block that corresponds to
986 // def_mem_state, and raise the LCA above that block. Note that this block
987 // is not necessarily def_mem_state's block! See the continuation of our
988 // previous example below (now illustrating blocks instead of nodes)
989 //
990 // def_mem_state's block
991 // |
992 // |
993 // pred_block
994 // |
995 // | ? ?
996 // | | |
997 // use_mem_state_block
998 //
999 // Here, we must raise the LCA above pred_block rather than
1000 // def_mem_state's block.
1001 //
1002 // Do not assert(use_mem_state_block != early, "Phi merging memory after access")
1003 // PhiNode may be at start of block 'early' with backedge to 'early'
1004 if (LCA == early) {
1005 // Don't bother if LCA is already raised all the way
1006 continue;
1007 }
1008 DEBUG_ONLY(bool found_match = false);
1009 for (uint j = PhiNode::Input, jmax = use_mem_state->req(); j < jmax; j++) {
1010 if (use_mem_state->in(j) == def_mem_state) { // Found matching input?
1011 DEBUG_ONLY(found_match = true);
1012 Block* pred_block = get_block_for_node(use_mem_state_block->pred(j));
1013 if (pred_block != early) {
1014 // Lazily set the LCA mark
1015 pred_block->set_raise_LCA_mark(load_index);
1016 must_raise_LCA_above_marks = true;
1017 } else /* if (pred_block == early) */ {
1018 // We know already now that we must raise LCA all the way to early.
1019 LCA = early;
1020 // This turns off the process of gathering non_early_stores.
1021 }
1022 }
1023 }
1024 assert(found_match, "no worklist bug");
1025 } else if (use_mem_state_block != early) {
1026 // We found an anti-dependent store outside the load's 'early' block. The
1027 // store may be between the current LCA and the earliest possible block
1028 // (but it could very well also be on a distinct control-flow path).
1029 // Lazily set the LCA mark and push to non_early_stores.
1030 if (LCA == early) {
1031 // Don't bother if LCA is already raised all the way
1032 continue;
1033 }
1034 if (unrelated_load_in_store_null_block(use_mem_state, load)) {
1035 continue;
1036 }
1037 use_mem_state_block->set_raise_LCA_mark(load_index);
1038 must_raise_LCA_above_marks = true;
1039 non_early_stores.push(use_mem_state);
1040 } else /* if (use_mem_state_block == early) */ {
1041 // We found an anti-dependent store in the load's 'early' block.
1042 // Therefore, we know already now that we must raise LCA all the way to
1043 // early and that we need to add an anti-dependence edge to the store.
1044 assert(use_mem_state != load->find_exact_control(load->in(0)), "dependence cycle found");
1045 if (verify) {
1046 assert(use_mem_state->find_edge(load) != -1 || unrelated_load_in_store_null_block(use_mem_state, load),
1047 "missing precedence edge");
1048 } else {
1049 use_mem_state->add_prec(load);
1050 }
1051 LCA = early;
1052 // This turns off the process of gathering non_early_stores.
1053 }
1054 }
1055 // Worklist is now empty; we have visited all possible anti-dependences.
1056
1057 // Finished if 'load' must be scheduled in its 'early' block.
1058 // If we found any stores there, they have already been given
1059 // anti-dependence edges.
1060 if (LCA == early) {
1061 return LCA;
1062 }
1063
1064 // We get here only if there are no anti-dependent stores in the load's
1065 // 'early' block and if no memory Phi has forced LCA to the early block. Now
1066 // we must raise the LCA above the blocks for all the anti-dependent stores
1067 // and above the predecessor blocks of anti-dependent memory Phis we reached
1068 // during the search.
1069 if (must_raise_LCA_above_marks) {
1070 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
1071 }
1072
1073 // If LCA == early at this point, there were no stores that required
1074 // anti-dependence edges in the early block. Otherwise, we would have eagerly
1075 // raised the LCA to early already in the worklist loop.
1076 if (LCA == early) {
1077 return LCA;
1078 }
1079
1080 // The raised LCA block can now be a home to anti-dependent stores for which
1081 // we still need to add anti-dependence edges, but no LCA predecessor block
1082 // contains any such stores (otherwise, we would have raised the LCA even
1083 // higher).
1084 //
1085 // The raised LCA will be a lower bound for placing the load, preventing the
1086 // load from sinking past any block containing a store that may overwrite
1087 // memory that the load must witness.
1088 //
1089 // Now we need to insert the necessary anti-dependence edges from 'load' to
1090 // each store in the non-early LCA block. We have recorded all such potential
1091 // stores in non_early_stores.
1092 //
1093 // If LCA->raise_LCA_mark() != load_index, it means that we raised the LCA to
1094 // a block in which we did not find any anti-dependent stores. So, no need to
1095 // search for any such stores.
1096 if (LCA->raise_LCA_mark() == load_index) {
1097 while (non_early_stores.size() > 0) {
1098 Node* store = non_early_stores.pop();
1099 Block* store_block = get_block_for_node(store);
1100 if (store_block == LCA) {
1101 // Add anti-dependence edge from the load to the store in the non-early
1102 // LCA.
1103 assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
1104 if (verify) {
1105 assert(store->find_edge(load) != -1, "missing precedence edge");
1106 } else {
1107 store->add_prec(load);
1108 }
1109 } else {
1110 assert(store_block->raise_LCA_mark() == load_index, "block was marked");
1111 }
1112 }
1113 }
1114
1115 assert(LCA->dominates(LCA_orig), "unsound updated LCA");
1116
1117 // Return the highest block containing stores; any stores
1118 // within that block have been given anti-dependence edges.
1119 return LCA;
1120 }
1121
1122 // This class is used to iterate backwards over the nodes in the graph.
1123
1124 class Node_Backward_Iterator {
1125
1126 private:
1127 Node_Backward_Iterator();
1128
1129 public:
1130 // Constructor for the iterator
1131 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);
1132
1133 // Postincrement operator to iterate over the nodes
1134 Node *next();
1135
1136 private:
1137 VectorSet &_visited;
1138 Node_Stack &_stack;
1139 PhaseCFG &_cfg;
1140 };
1141
1142 // Constructor for the Node_Backward_Iterator
1143 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)
1144 : _visited(visited), _stack(stack), _cfg(cfg) {
1145 // The stack should contain exactly the root
1146 stack.clear();
1147 stack.push(root, root->outcnt());
1148
1149 // Clear the visited bits
1150 visited.clear();
1151 }
1152
1153 // Iterator for the Node_Backward_Iterator
1154 Node *Node_Backward_Iterator::next() {
1155
1156 // If the _stack is empty, then just return null: finished.
1157 if ( !_stack.size() )
1158 return nullptr;
1159
1160 // I visit unvisited not-anti-dependence users first, then anti-dependent
1161 // children next. I iterate backwards to support removal of nodes.
1162 // The stack holds states consisting of 3 values:
1163 // current Def node, flag which indicates 1st/2nd pass, index of current out edge
1164 Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);
1165 bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);
1166 uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.
1167 _stack.pop();
1168
1169 // I cycle here when I am entering a deeper level of recursion.
1170 // The key variable 'self' was set prior to jumping here.
1171 while( 1 ) {
1172
1173 _visited.set(self->_idx);
1174
1175 // Now schedule all uses as late as possible.
1176 const Node* src = self->is_Proj() ? self->in(0) : self;
1177 uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
1178
1179 // Schedule all nodes in a post-order visit
1180 Node *unvisited = nullptr; // Unvisited anti-dependent Node, if any
1181
1182 // Scan for unvisited nodes
1183 while (idx > 0) {
1184 // For all uses, schedule late
1185 Node* n = self->raw_out(--idx); // Use
1186
1187 // Skip already visited children
1188 if ( _visited.test(n->_idx) )
1189 continue;
1190
1191 // do not traverse backward control edges
1192 Node *use = n->is_Proj() ? n->in(0) : n;
1193 uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
1194
1195 if ( use_rpo < src_rpo )
1196 continue;
1197
1198 // Phi nodes always precede uses in a basic block
1199 if ( use_rpo == src_rpo && use->is_Phi() )
1200 continue;
1201
1202 unvisited = n; // Found unvisited
1203
1204 // Check for possible-anti-dependent
1205 // 1st pass: No such nodes, 2nd pass: Only such nodes.
1206 if (n->needs_anti_dependence_check() == iterate_anti_dep) {
1207 unvisited = n; // Found unvisited
1208 break;
1209 }
1210 }
1211
1212 // Did I find an unvisited not-anti-dependent Node?
1213 if (!unvisited) {
1214 if (!iterate_anti_dep) {
1215 // 2nd pass: Iterate over nodes which needs_anti_dependence_check.
1216 iterate_anti_dep = true;
1217 idx = self->outcnt();
1218 continue;
1219 }
1220 break; // All done with children; post-visit 'self'
1221 }
1222
1223 // Visit the unvisited Node. Contains the obvious push to
1224 // indicate I'm entering a deeper level of recursion. I push the
1225 // old state onto the _stack and set a new state and loop (recurse).
1226 _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);
1227 self = unvisited;
1228 iterate_anti_dep = false;
1229 idx = self->outcnt();
1230 } // End recursion loop
1231
1232 return self;
1233 }
1234
1235 //------------------------------ComputeLatenciesBackwards----------------------
1236 // Compute the latency of all the instructions.
1237 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {
1238 #ifndef PRODUCT
1239 if (trace_opto_pipelining())
1240 tty->print("\n#---- ComputeLatenciesBackwards ----\n");
1241 #endif
1242
1243 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1244 Node *n;
1245
1246 // Walk over all the nodes from last to first
1247 while ((n = iter.next())) {
1248 // Set the latency for the definitions of this instruction
1249 partial_latency_of_defs(n);
1250 }
1251 } // end ComputeLatenciesBackwards
1252
1253 //------------------------------partial_latency_of_defs------------------------
1254 // Compute the latency impact of this node on all defs. This computes
1255 // a number that increases as we approach the beginning of the routine.
1256 void PhaseCFG::partial_latency_of_defs(Node *n) {
1257 // Set the latency for this instruction
1258 #ifndef PRODUCT
1259 if (trace_opto_pipelining()) {
1260 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1261 dump();
1262 }
1263 #endif
1264
1265 if (n->is_Proj()) {
1266 n = n->in(0);
1267 }
1268
1269 if (n->is_Root()) {
1270 return;
1271 }
1272
1273 uint nlen = n->len();
1274 uint use_latency = get_latency_for_node(n);
1275 uint use_pre_order = get_block_for_node(n)->_pre_order;
1276
1277 for (uint j = 0; j < nlen; j++) {
1278 Node *def = n->in(j);
1279
1280 if (!def || def == n) {
1281 continue;
1282 }
1283
1284 // Walk backwards thru projections
1285 if (def->is_Proj()) {
1286 def = def->in(0);
1287 }
1288
1289 #ifndef PRODUCT
1290 if (trace_opto_pipelining()) {
1291 tty->print("# in(%2d): ", j);
1292 def->dump();
1293 }
1294 #endif
1295
1296 // If the defining block is not known, assume it is ok
1297 Block *def_block = get_block_for_node(def);
1298 uint def_pre_order = def_block ? def_block->_pre_order : 0;
1299
1300 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
1301 continue;
1302 }
1303
1304 uint delta_latency = n->latency(j);
1305 uint current_latency = delta_latency + use_latency;
1306
1307 if (get_latency_for_node(def) < current_latency) {
1308 set_latency_for_node(def, current_latency);
1309 }
1310
1311 #ifndef PRODUCT
1312 if (trace_opto_pipelining()) {
1313 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
1314 }
1315 #endif
1316 }
1317 }
1318
1319 //------------------------------latency_from_use-------------------------------
1320 // Compute the latency of a specific use
1321 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
1322 // If self-reference, return no latency
1323 if (use == n || use->is_Root()) {
1324 return 0;
1325 }
1326
1327 uint def_pre_order = get_block_for_node(def)->_pre_order;
1328 uint latency = 0;
1329
1330 // If the use is not a projection, then it is simple...
1331 if (!use->is_Proj()) {
1332 #ifndef PRODUCT
1333 if (trace_opto_pipelining()) {
1334 tty->print("# out(): ");
1335 use->dump();
1336 }
1337 #endif
1338
1339 uint use_pre_order = get_block_for_node(use)->_pre_order;
1340
1341 if (use_pre_order < def_pre_order)
1342 return 0;
1343
1344 if (use_pre_order == def_pre_order && use->is_Phi())
1345 return 0;
1346
1347 uint nlen = use->len();
1348 uint nl = get_latency_for_node(use);
1349
1350 for ( uint j=0; j<nlen; j++ ) {
1351 if (use->in(j) == n) {
1352 // Change this if we want local latencies
1353 uint ul = use->latency(j);
1354 uint l = ul + nl;
1355 if (latency < l) latency = l;
1356 #ifndef PRODUCT
1357 if (trace_opto_pipelining()) {
1358 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d",
1359 nl, j, ul, l, latency);
1360 }
1361 #endif
1362 }
1363 }
1364 } else {
1365 // This is a projection, just grab the latency of the use(s)
1366 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1367 uint l = latency_from_use(use, def, use->fast_out(j));
1368 if (latency < l) latency = l;
1369 }
1370 }
1371
1372 return latency;
1373 }
1374
1375 //------------------------------latency_from_uses------------------------------
1376 // Compute the latency of this instruction relative to all of it's uses.
1377 // This computes a number that increases as we approach the beginning of the
1378 // routine.
1379 void PhaseCFG::latency_from_uses(Node *n) {
1380 // Set the latency for this instruction
1381 #ifndef PRODUCT
1382 if (trace_opto_pipelining()) {
1383 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1384 dump();
1385 }
1386 #endif
1387 uint latency=0;
1388 const Node *def = n->is_Proj() ? n->in(0): n;
1389
1390 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1391 uint l = latency_from_use(n, def, n->fast_out(i));
1392
1393 if (latency < l) latency = l;
1394 }
1395
1396 set_latency_for_node(n, latency);
1397 }
1398
1399 //------------------------------is_cheaper_block-------------------------
1400 // Check if a block between early and LCA block of uses is cheaper by
1401 // frequency-based policy, latency-based policy and random-based policy
1402 bool PhaseCFG::is_cheaper_block(Block* LCA, Node* self, uint target_latency,
1403 uint end_latency, double least_freq,
1404 int cand_cnt, bool in_latency) {
1405 if (StressGCM) {
1406 // Should be randomly accepted in stress mode
1407 return C->randomized_select(cand_cnt);
1408 }
1409
1410 const double delta = 1 + PROB_UNLIKELY_MAG(4);
1411
1412 // Better Frequency. Add a small delta to the comparison to not needlessly
1413 // hoist because of, e.g., small numerical inaccuracies.
1414 if (LCA->_freq * delta < least_freq) {
1415 return true;
1416 }
1417
1418 // Otherwise, choose with latency
1419 if (!in_latency && // No block containing latency
1420 LCA->_freq < least_freq * delta && // No worse frequency
1421 target_latency >= end_latency && // within latency range
1422 !self->is_iteratively_computed() // But don't hoist IV increments
1423 // because they may end up above other uses of their phi forcing
1424 // their result register to be different from their input.
1425 ) {
1426 return true;
1427 }
1428
1429 return false;
1430 }
1431
1432 //------------------------------hoist_to_cheaper_block-------------------------
1433 // Pick a block for node self, between early and LCA block of uses, that is a
1434 // cheaper alternative to LCA.
1435 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1436 Block* least = LCA;
1437 double least_freq = least->_freq;
1438 uint target = get_latency_for_node(self);
1439 uint start_latency = get_latency_for_node(LCA->head());
1440 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1441 bool in_latency = (target <= start_latency);
1442 const Block* root_block = get_block_for_node(_root);
1443
1444 // Turn off latency scheduling if scheduling is just plain off
1445 if (!C->do_scheduling())
1446 in_latency = true;
1447
1448 // Do not hoist (to cover latency) instructions which target a
1449 // single register. Hoisting stretches the live range of the
1450 // single register and may force spilling.
1451 MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1452 if (mach != nullptr && mach->out_RegMask().is_bound1() && !mach->out_RegMask().is_empty()) {
1453 in_latency = true;
1454 }
1455
1456 #ifndef PRODUCT
1457 if (trace_opto_pipelining()) {
1458 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1459 self->dump();
1460 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1461 LCA->_pre_order,
1462 LCA->head()->_idx,
1463 start_latency,
1464 LCA->get_node(LCA->end_idx())->_idx,
1465 end_latency,
1466 least_freq);
1467 }
1468 #endif
1469
1470 int cand_cnt = 0; // number of candidates tried
1471
1472 // Walk up the dominator tree from LCA (Lowest common ancestor) to
1473 // the earliest legal location. Capture the least execution frequency,
1474 // or choose a random block if -XX:+StressGCM, or using latency-based policy
1475 while (LCA != early) {
1476 LCA = LCA->_idom; // Follow up the dominator tree
1477
1478 if (LCA == nullptr) {
1479 // Bailout without retry
1480 assert(false, "graph should be schedulable");
1481 C->record_method_not_compilable("late schedule failed: LCA is null");
1482 return least;
1483 }
1484
1485 // Don't hoist machine instructions to the root basic block
1486 if (mach != nullptr && LCA == root_block)
1487 break;
1488
1489 if (self->is_memory_writer() &&
1490 (LCA->_loop->depth() > early->_loop->depth())) {
1491 // LCA is an invalid placement for a memory writer: choosing it would
1492 // cause memory interference, as illustrated in schedule_late().
1493 continue;
1494 }
1495 verify_memory_writer_placement(LCA, self);
1496
1497 uint start_lat = get_latency_for_node(LCA->head());
1498 uint end_idx = LCA->end_idx();
1499 uint end_lat = get_latency_for_node(LCA->get_node(end_idx));
1500 double LCA_freq = LCA->_freq;
1501 #ifndef PRODUCT
1502 if (trace_opto_pipelining()) {
1503 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1504 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1505 }
1506 #endif
1507 cand_cnt++;
1508 if (is_cheaper_block(LCA, self, target, end_lat, least_freq, cand_cnt, in_latency)) {
1509 least = LCA; // Found cheaper block
1510 least_freq = LCA_freq;
1511 start_latency = start_lat;
1512 end_latency = end_lat;
1513 if (target <= start_lat)
1514 in_latency = true;
1515 }
1516 }
1517
1518 #ifndef PRODUCT
1519 if (trace_opto_pipelining()) {
1520 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g",
1521 least->_pre_order, start_latency, least_freq);
1522 }
1523 #endif
1524
1525 // See if the latency needs to be updated
1526 if (target < end_latency) {
1527 #ifndef PRODUCT
1528 if (trace_opto_pipelining()) {
1529 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1530 }
1531 #endif
1532 set_latency_for_node(self, end_latency);
1533 partial_latency_of_defs(self);
1534 }
1535
1536 return least;
1537 }
1538
1539
1540 //------------------------------schedule_late-----------------------------------
1541 // Now schedule all codes as LATE as possible. This is the LCA in the
1542 // dominator tree of all USES of a value. Pick the block with the least
1543 // loop nesting depth that is lowest in the dominator tree.
1544 extern const char must_clone[];
1545 void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
1546 #ifndef PRODUCT
1547 if (trace_opto_pipelining())
1548 tty->print("\n#---- schedule_late ----\n");
1549 #endif
1550
1551 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1552 Node *self;
1553
1554 // Walk over all the nodes from last to first
1555 while ((self = iter.next())) {
1556 Block* early = get_block_for_node(self); // Earliest legal placement
1557
1558 if (self->is_top()) {
1559 // Top node goes in bb #2 with other constants.
1560 // It must be special-cased, because it has no out edges.
1561 early->add_inst(self);
1562 continue;
1563 }
1564
1565 // No uses, just terminate
1566 if (self->outcnt() == 0) {
1567 assert(self->is_MachProj(), "sanity");
1568 continue; // Must be a dead machine projection
1569 }
1570
1571 // If node is pinned in the block, then no scheduling can be done.
1572 if( self->pinned() ) // Pinned in block?
1573 continue;
1574
1575 #ifdef ASSERT
1576 // Assert that memory writers (e.g. stores) have a "home" block (the block
1577 // given by their control input), and that this block corresponds to their
1578 // earliest possible placement. This guarantees that
1579 // hoist_to_cheaper_block() will always have at least one valid choice.
1580 if (self->is_memory_writer()) {
1581 assert(find_block_for_node(self->in(0)) == early,
1582 "The home of a memory writer must also be its earliest placement");
1583 }
1584 #endif
1585
1586 MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1587 if (mach) {
1588 switch (mach->ideal_Opcode()) {
1589 case Op_CreateEx:
1590 // Don't move exception creation
1591 early->add_inst(self);
1592 continue;
1593 break;
1594 case Op_CastI2N:
1595 early->add_inst(self);
1596 continue;
1597 case Op_CheckCastPP: {
1598 // Don't move CheckCastPP nodes away from their input, if the input
1599 // is a rawptr (5071820).
1600 Node *def = self->in(1);
1601 if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
1602 early->add_inst(self);
1603 #ifdef ASSERT
1604 _raw_oops.push(def);
1605 #endif
1606 continue;
1607 }
1608 break;
1609 }
1610 default:
1611 break;
1612 }
1613 if (C->has_irreducible_loop() && self->is_memory_writer()) {
1614 // If the CFG is irreducible, place memory writers in their home block.
1615 // This prevents hoist_to_cheaper_block() from accidentally placing such
1616 // nodes into deeper loops, as in the following example:
1617 //
1618 // Home placement of store in B1 (loop L1):
1619 //
1620 // B1 (L1):
1621 // m1 <- ..
1622 // m2 <- store m1, ..
1623 // B2 (L2):
1624 // jump B2
1625 // B3 (L1):
1626 // .. <- .. m2, ..
1627 //
1628 // Wrong "hoisting" of store to B2 (in loop L2, child of L1):
1629 //
1630 // B1 (L1):
1631 // m1 <- ..
1632 // B2 (L2):
1633 // m2 <- store m1, ..
1634 // # Wrong: m1 and m2 interfere at this point.
1635 // jump B2
1636 // B3 (L1):
1637 // .. <- .. m2, ..
1638 //
1639 // This "hoist inversion" can happen due to different factors such as
1640 // inaccurate estimation of frequencies for irreducible CFGs, and loops
1641 // with always-taken exits in reducible CFGs. In the reducible case,
1642 // hoist inversion is prevented by discarding invalid blocks (those in
1643 // deeper loops than the home block). In the irreducible case, the
1644 // invalid blocks cannot be identified due to incomplete loop nesting
1645 // information, hence a conservative solution is taken.
1646 #ifndef PRODUCT
1647 if (trace_opto_pipelining()) {
1648 tty->print_cr("# Irreducible loops: schedule in home block B%d:",
1649 early->_pre_order);
1650 self->dump();
1651 }
1652 #endif
1653 schedule_node_into_block(self, early);
1654 continue;
1655 }
1656 }
1657
1658 // Gather LCA of all uses
1659 Block *LCA = nullptr;
1660 {
1661 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1662 // For all uses, find LCA
1663 Node* use = self->fast_out(i);
1664 LCA = raise_LCA_above_use(LCA, use, self, this);
1665 }
1666 guarantee(LCA != nullptr, "There must be a LCA");
1667 } // (Hide defs of imax, i from rest of block.)
1668
1669 // Place temps in the block of their use. This isn't a
1670 // requirement for correctness but it reduces useless
1671 // interference between temps and other nodes.
1672 if (mach != nullptr && mach->is_MachTemp()) {
1673 map_node_to_block(self, LCA);
1674 LCA->add_inst(self);
1675 continue;
1676 }
1677
1678 // Check if 'self' could be anti-dependent on memory
1679 if (self->needs_anti_dependence_check()) {
1680 // Hoist LCA above possible-defs and insert anti-dependences to
1681 // defs in new LCA block.
1682 LCA = raise_above_anti_dependences(LCA, self);
1683 if (C->failing()) {
1684 return;
1685 }
1686 }
1687
1688 if (early->_dom_depth > LCA->_dom_depth) {
1689 // Somehow the LCA has moved above the earliest legal point.
1690 // (One way this can happen is via memory_early_block.)
1691 if (C->subsume_loads() == true && !C->failing()) {
1692 // Retry with subsume_loads == false
1693 // If this is the first failure, the sentinel string will "stick"
1694 // to the Compile object, and the C2Compiler will see it and retry.
1695 C->record_failure(C2Compiler::retry_no_subsuming_loads());
1696 } else {
1697 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1698 assert(C->failure_is_artificial(), "graph should be schedulable");
1699 C->record_method_not_compilable("late schedule failed: incorrect graph" DEBUG_ONLY(COMMA true));
1700 }
1701 return;
1702 }
1703
1704 if (self->is_memory_writer()) {
1705 // If the LCA of a memory writer is a descendant of its home loop, hoist
1706 // it into a valid placement.
1707 while (LCA->_loop->depth() > early->_loop->depth()) {
1708 LCA = LCA->_idom;
1709 }
1710 assert(LCA != nullptr, "a valid LCA must exist");
1711 verify_memory_writer_placement(LCA, self);
1712 }
1713
1714 // If there is no opportunity to hoist, then we're done.
1715 // In stress mode, try to hoist even the single operations.
1716 bool try_to_hoist = StressGCM || (LCA != early);
1717
1718 // Must clone guys stay next to use; no hoisting allowed.
1719 // Also cannot hoist guys that alter memory or are otherwise not
1720 // allocatable (hoisting can make a value live longer, leading to
1721 // anti and output dependency problems which are normally resolved
1722 // by the register allocator giving everyone a different register).
1723 if (mach != nullptr && must_clone[mach->ideal_Opcode()])
1724 try_to_hoist = false;
1725
1726 Block* late = nullptr;
1727 if (try_to_hoist) {
1728 // Now find the block with the least execution frequency.
1729 // Start at the latest schedule and work up to the earliest schedule
1730 // in the dominator tree. Thus the Node will dominate all its uses.
1731 late = hoist_to_cheaper_block(LCA, early, self);
1732 } else {
1733 // Just use the LCA of the uses.
1734 late = LCA;
1735 }
1736
1737 // Put the node into target block
1738 schedule_node_into_block(self, late);
1739
1740 #ifdef ASSERT
1741 if (self->needs_anti_dependence_check()) {
1742 // since precedence edges are only inserted when we're sure they
1743 // are needed make sure that after placement in a block we don't
1744 // need any new precedence edges.
1745 verify_anti_dependences(late, self);
1746 }
1747 #endif
1748 } // Loop until all nodes have been visited
1749
1750 } // end ScheduleLate
1751
1752 //------------------------------GlobalCodeMotion-------------------------------
1753 void PhaseCFG::global_code_motion() {
1754 ResourceMark rm;
1755
1756 #ifndef PRODUCT
1757 if (trace_opto_pipelining()) {
1758 tty->print("\n---- Start GlobalCodeMotion ----\n");
1759 }
1760 #endif
1761
1762 // Initialize the node to block mapping for things on the proj_list
1763 for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1764 unmap_node_from_block(_matcher.get_projection(i));
1765 }
1766
1767 // Set the basic block for Nodes pinned into blocks
1768 VectorSet visited;
1769 schedule_pinned_nodes(visited);
1770
1771 // Find the earliest Block any instruction can be placed in. Some
1772 // instructions are pinned into Blocks. Unpinned instructions can
1773 // appear in last block in which all their inputs occur.
1774 visited.clear();
1775 Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow
1776 if (!schedule_early(visited, stack)) {
1777 // Bailout without retry
1778 assert(C->failure_is_artificial(), "early schedule failed");
1779 C->record_method_not_compilable("early schedule failed" DEBUG_ONLY(COMMA true));
1780 return;
1781 }
1782
1783 // Build Def-Use edges.
1784 // Compute the latency information (via backwards walk) for all the
1785 // instructions in the graph
1786 _node_latency = new GrowableArray<uint>(); // resource_area allocation
1787
1788 if (C->do_scheduling()) {
1789 compute_latencies_backwards(visited, stack);
1790 }
1791
1792 // Now schedule all codes as LATE as possible. This is the LCA in the
1793 // dominator tree of all USES of a value. Pick the block with the least
1794 // loop nesting depth that is lowest in the dominator tree.
1795 // ( visited.clear() called in schedule_late()->Node_Backward_Iterator() )
1796 schedule_late(visited, stack);
1797 if (C->failing()) {
1798 return;
1799 }
1800
1801 #ifndef PRODUCT
1802 if (trace_opto_pipelining()) {
1803 tty->print("\n---- Detect implicit null checks ----\n");
1804 }
1805 #endif
1806
1807 // Detect implicit-null-check opportunities. Basically, find null checks
1808 // with suitable memory ops nearby. Use the memory op to do the null check.
1809 // I can generate a memory op if there is not one nearby.
1810 if (C->is_method_compilation()) {
1811 // By reversing the loop direction we get a very minor gain on mpegaudio.
1812 // Feel free to revert to a forward loop for clarity.
1813 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1814 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1815 Node* proj = _matcher._null_check_tests[i];
1816 Node* val = _matcher._null_check_tests[i + 1];
1817 Block* block = get_block_for_node(proj);
1818 implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1819 // The implicit_null_check will only perform the transformation
1820 // if the null branch is truly uncommon, *and* it leads to an
1821 // uncommon trap. Combined with the too_many_traps guards
1822 // above, this prevents SEGV storms reported in 6366351,
1823 // by recompiling offending methods without this optimization.
1824 if (C->failing()) {
1825 return;
1826 }
1827 }
1828 }
1829
1830 bool block_size_threshold_ok = false;
1831 intptr_t *recalc_pressure_nodes = nullptr;
1832 if (OptoRegScheduling) {
1833 for (uint i = 0; i < number_of_blocks(); i++) {
1834 Block* block = get_block(i);
1835 if (block->number_of_nodes() > 10) {
1836 block_size_threshold_ok = true;
1837 break;
1838 }
1839 }
1840 }
1841
1842 // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
1843 // is key to enabling this feature.
1844 PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
1845 ResourceArea live_arena(mtCompiler, Arena::Tag::tag_reglive); // Arena for liveness
1846 ResourceMark rm_live(&live_arena);
1847 PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
1848 PhaseIFG ifg(&live_arena);
1849 if (OptoRegScheduling && block_size_threshold_ok) {
1850 regalloc.mark_ssa();
1851 Compile::TracePhase tp(_t_computeLive);
1852 rm_live.reset_to_mark(); // Reclaim working storage
1853 IndexSet::reset_memory(C, &live_arena);
1854 uint node_size = regalloc._lrg_map.max_lrg_id();
1855 ifg.init(node_size); // Empty IFG
1856 regalloc.set_ifg(ifg);
1857 regalloc.set_live(live);
1858 regalloc.gather_lrg_masks(false); // Collect LRG masks
1859 live.compute(node_size); // Compute liveness
1860
1861 recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
1862 for (uint i = 0; i < node_size; i++) {
1863 recalc_pressure_nodes[i] = 0;
1864 }
1865 }
1866 _regalloc = ®alloc;
1867
1868 #ifndef PRODUCT
1869 if (trace_opto_pipelining()) {
1870 tty->print("\n---- Start Local Scheduling ----\n");
1871 }
1872 #endif
1873
1874 // Schedule locally. Right now a simple topological sort.
1875 // Later, do a real latency aware scheduler.
1876 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1877 visited.reset();
1878 for (uint i = 0; i < number_of_blocks(); i++) {
1879 Block* block = get_block(i);
1880 if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
1881 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1882 assert(C->failure_is_artificial(), "local schedule failed");
1883 C->record_method_not_compilable("local schedule failed" DEBUG_ONLY(COMMA true));
1884 }
1885 _regalloc = nullptr;
1886 return;
1887 }
1888 }
1889 _regalloc = nullptr;
1890
1891 // If we inserted any instructions between a Call and his CatchNode,
1892 // clone the instructions on all paths below the Catch.
1893 for (uint i = 0; i < number_of_blocks(); i++) {
1894 Block* block = get_block(i);
1895 call_catch_cleanup(block);
1896 if (C->failing()) {
1897 return;
1898 }
1899 }
1900
1901 #ifndef PRODUCT
1902 if (trace_opto_pipelining()) {
1903 tty->print("\n---- After GlobalCodeMotion ----\n");
1904 for (uint i = 0; i < number_of_blocks(); i++) {
1905 Block* block = get_block(i);
1906 block->dump();
1907 }
1908 }
1909 #endif
1910 // Dead.
1911 _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1912 }
1913
1914 bool PhaseCFG::do_global_code_motion() {
1915
1916 build_dominator_tree();
1917 if (C->failing()) {
1918 return false;
1919 }
1920
1921 NOT_PRODUCT( C->verify_graph_edges(); )
1922
1923 estimate_block_frequency();
1924
1925 global_code_motion();
1926
1927 if (C->failing()) {
1928 return false;
1929 }
1930
1931 return true;
1932 }
1933
1934 //------------------------------Estimate_Block_Frequency-----------------------
1935 // Estimate block frequencies based on IfNode probabilities.
1936 void PhaseCFG::estimate_block_frequency() {
1937
1938 // Force conditional branches leading to uncommon traps to be unlikely,
1939 // not because we get to the uncommon_trap with less relative frequency,
1940 // but because an uncommon_trap typically causes a deopt, so we only get
1941 // there once.
1942 if (C->do_freq_based_layout()) {
1943 Block_List worklist;
1944 Block* root_blk = get_block(0);
1945 for (uint i = 1; i < root_blk->num_preds(); i++) {
1946 Block *pb = get_block_for_node(root_blk->pred(i));
1947 if (pb->has_uncommon_code()) {
1948 worklist.push(pb);
1949 }
1950 }
1951 while (worklist.size() > 0) {
1952 Block* uct = worklist.pop();
1953 if (uct == get_root_block()) {
1954 continue;
1955 }
1956 for (uint i = 1; i < uct->num_preds(); i++) {
1957 Block *pb = get_block_for_node(uct->pred(i));
1958 if (pb->_num_succs == 1) {
1959 worklist.push(pb);
1960 } else if (pb->num_fall_throughs() == 2) {
1961 pb->update_uncommon_branch(uct);
1962 }
1963 }
1964 }
1965 }
1966
1967 // Create the loop tree and calculate loop depth.
1968 _root_loop = create_loop_tree();
1969 _root_loop->compute_loop_depth(0);
1970
1971 // Compute block frequency of each block, relative to a single loop entry.
1972 _root_loop->compute_freq();
1973
1974 // Adjust all frequencies to be relative to a single method entry
1975 _root_loop->_freq = 1.0;
1976 _root_loop->scale_freq();
1977
1978 // Save outmost loop frequency for LRG frequency threshold
1979 _outer_loop_frequency = _root_loop->outer_loop_freq();
1980
1981 // force paths ending at uncommon traps to be infrequent
1982 if (!C->do_freq_based_layout()) {
1983 Block_List worklist;
1984 Block* root_blk = get_block(0);
1985 for (uint i = 1; i < root_blk->num_preds(); i++) {
1986 Block *pb = get_block_for_node(root_blk->pred(i));
1987 if (pb->has_uncommon_code()) {
1988 worklist.push(pb);
1989 }
1990 }
1991 while (worklist.size() > 0) {
1992 Block* uct = worklist.pop();
1993 uct->_freq = PROB_MIN;
1994 for (uint i = 1; i < uct->num_preds(); i++) {
1995 Block *pb = get_block_for_node(uct->pred(i));
1996 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1997 worklist.push(pb);
1998 }
1999 }
2000 }
2001 }
2002
2003 #ifdef ASSERT
2004 for (uint i = 0; i < number_of_blocks(); i++) {
2005 Block* b = get_block(i);
2006 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
2007 }
2008 #endif
2009
2010 #ifndef PRODUCT
2011 if (PrintCFGBlockFreq) {
2012 tty->print_cr("CFG Block Frequencies");
2013 _root_loop->dump_tree();
2014 if (Verbose) {
2015 tty->print_cr("PhaseCFG dump");
2016 dump();
2017 tty->print_cr("Node dump");
2018 _root->dump(99999);
2019 }
2020 }
2021 #endif
2022 }
2023
2024 //----------------------------create_loop_tree--------------------------------
2025 // Create a loop tree from the CFG
2026 CFGLoop* PhaseCFG::create_loop_tree() {
2027
2028 #ifdef ASSERT
2029 assert(get_block(0) == get_root_block(), "first block should be root block");
2030 for (uint i = 0; i < number_of_blocks(); i++) {
2031 Block* block = get_block(i);
2032 // Check that _loop field are clear...we could clear them if not.
2033 assert(block->_loop == nullptr, "clear _loop expected");
2034 // Sanity check that the RPO numbering is reflected in the _blocks array.
2035 // It doesn't have to be for the loop tree to be built, but if it is not,
2036 // then the blocks have been reordered since dom graph building...which
2037 // may question the RPO numbering
2038 assert(block->_rpo == i, "unexpected reverse post order number");
2039 }
2040 #endif
2041
2042 int idct = 0;
2043 CFGLoop* root_loop = new CFGLoop(idct++);
2044
2045 Block_List worklist;
2046
2047 // Assign blocks to loops
2048 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
2049 Block* block = get_block(i);
2050
2051 if (block->head()->is_Loop()) {
2052 Block* loop_head = block;
2053 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
2054 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
2055 Block* tail = get_block_for_node(tail_n);
2056
2057 // Defensively filter out Loop nodes for non-single-entry loops.
2058 // For all reasonable loops, the head occurs before the tail in RPO.
2059 if (i <= tail->_rpo) {
2060
2061 // The tail and (recursive) predecessors of the tail
2062 // are made members of a new loop.
2063
2064 assert(worklist.size() == 0, "nonempty worklist");
2065 CFGLoop* nloop = new CFGLoop(idct++);
2066 assert(loop_head->_loop == nullptr, "just checking");
2067 loop_head->_loop = nloop;
2068 // Add to nloop so push_pred() will skip over inner loops
2069 nloop->add_member(loop_head);
2070 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
2071
2072 while (worklist.size() > 0) {
2073 Block* member = worklist.pop();
2074 if (member != loop_head) {
2075 for (uint j = 1; j < member->num_preds(); j++) {
2076 nloop->push_pred(member, j, worklist, this);
2077 }
2078 }
2079 }
2080 }
2081 }
2082 }
2083
2084 // Create a member list for each loop consisting
2085 // of both blocks and (immediate child) loops.
2086 for (uint i = 0; i < number_of_blocks(); i++) {
2087 Block* block = get_block(i);
2088 CFGLoop* lp = block->_loop;
2089 if (lp == nullptr) {
2090 // Not assigned to a loop. Add it to the method's pseudo loop.
2091 block->_loop = root_loop;
2092 lp = root_loop;
2093 }
2094 if (lp == root_loop || block != lp->head()) { // loop heads are already members
2095 lp->add_member(block);
2096 }
2097 if (lp != root_loop) {
2098 if (lp->parent() == nullptr) {
2099 // Not a nested loop. Make it a child of the method's pseudo loop.
2100 root_loop->add_nested_loop(lp);
2101 }
2102 if (block == lp->head()) {
2103 // Add nested loop to member list of parent loop.
2104 lp->parent()->add_member(lp);
2105 }
2106 }
2107 }
2108
2109 return root_loop;
2110 }
2111
2112 //------------------------------push_pred--------------------------------------
2113 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
2114 Node* pred_n = blk->pred(i);
2115 Block* pred = cfg->get_block_for_node(pred_n);
2116 CFGLoop *pred_loop = pred->_loop;
2117 if (pred_loop == nullptr) {
2118 // Filter out blocks for non-single-entry loops.
2119 // For all reasonable loops, the head occurs before the tail in RPO.
2120 if (pred->_rpo > head()->_rpo) {
2121 pred->_loop = this;
2122 worklist.push(pred);
2123 }
2124 } else if (pred_loop != this) {
2125 // Nested loop.
2126 while (pred_loop->_parent != nullptr && pred_loop->_parent != this) {
2127 pred_loop = pred_loop->_parent;
2128 }
2129 // Make pred's loop be a child
2130 if (pred_loop->_parent == nullptr) {
2131 add_nested_loop(pred_loop);
2132 // Continue with loop entry predecessor.
2133 Block* pred_head = pred_loop->head();
2134 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
2135 assert(pred_head != head(), "loop head in only one loop");
2136 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
2137 } else {
2138 assert(pred_loop->_parent == this && _parent == nullptr, "just checking");
2139 }
2140 }
2141 }
2142
2143 //------------------------------add_nested_loop--------------------------------
2144 // Make cl a child of the current loop in the loop tree.
2145 void CFGLoop::add_nested_loop(CFGLoop* cl) {
2146 assert(_parent == nullptr, "no parent yet");
2147 assert(cl != this, "not my own parent");
2148 cl->_parent = this;
2149 CFGLoop* ch = _child;
2150 if (ch == nullptr) {
2151 _child = cl;
2152 } else {
2153 while (ch->_sibling != nullptr) { ch = ch->_sibling; }
2154 ch->_sibling = cl;
2155 }
2156 }
2157
2158 //------------------------------compute_loop_depth-----------------------------
2159 // Store the loop depth in each CFGLoop object.
2160 // Recursively walk the children to do the same for them.
2161 void CFGLoop::compute_loop_depth(int depth) {
2162 _depth = depth;
2163 CFGLoop* ch = _child;
2164 while (ch != nullptr) {
2165 ch->compute_loop_depth(depth + 1);
2166 ch = ch->_sibling;
2167 }
2168 }
2169
2170 //------------------------------compute_freq-----------------------------------
2171 // Compute the frequency of each block and loop, relative to a single entry
2172 // into the dominating loop head.
2173 void CFGLoop::compute_freq() {
2174 // Bottom up traversal of loop tree (visit inner loops first.)
2175 // Set loop head frequency to 1.0, then transitively
2176 // compute frequency for all successors in the loop,
2177 // as well as for each exit edge. Inner loops are
2178 // treated as single blocks with loop exit targets
2179 // as the successor blocks.
2180
2181 // Nested loops first
2182 CFGLoop* ch = _child;
2183 while (ch != nullptr) {
2184 ch->compute_freq();
2185 ch = ch->_sibling;
2186 }
2187 assert (_members.length() > 0, "no empty loops");
2188 Block* hd = head();
2189 hd->_freq = 1.0;
2190 for (int i = 0; i < _members.length(); i++) {
2191 CFGElement* s = _members.at(i);
2192 double freq = s->_freq;
2193 if (s->is_block()) {
2194 Block* b = s->as_Block();
2195 for (uint j = 0; j < b->_num_succs; j++) {
2196 Block* sb = b->_succs[j];
2197 update_succ_freq(sb, freq * b->succ_prob(j));
2198 }
2199 } else {
2200 CFGLoop* lp = s->as_CFGLoop();
2201 assert(lp->_parent == this, "immediate child");
2202 for (int k = 0; k < lp->_exits.length(); k++) {
2203 Block* eb = lp->_exits.at(k).get_target();
2204 double prob = lp->_exits.at(k).get_prob();
2205 update_succ_freq(eb, freq * prob);
2206 }
2207 }
2208 }
2209
2210 // For all loops other than the outer, "method" loop,
2211 // sum and normalize the exit probability. The "method" loop
2212 // should keep the initial exit probability of 1, so that
2213 // inner blocks do not get erroneously scaled.
2214 if (_depth != 0) {
2215 // Total the exit probabilities for this loop.
2216 double exits_sum = 0.0f;
2217 for (int i = 0; i < _exits.length(); i++) {
2218 exits_sum += _exits.at(i).get_prob();
2219 }
2220
2221 // Normalize the exit probabilities. Until now, the
2222 // probabilities estimate the possibility of exit per
2223 // a single loop iteration; afterward, they estimate
2224 // the probability of exit per loop entry.
2225 for (int i = 0; i < _exits.length(); i++) {
2226 Block* et = _exits.at(i).get_target();
2227 float new_prob = 0.0f;
2228 if (_exits.at(i).get_prob() > 0.0f) {
2229 new_prob = _exits.at(i).get_prob() / exits_sum;
2230 }
2231 BlockProbPair bpp(et, new_prob);
2232 _exits.at_put(i, bpp);
2233 }
2234
2235 // Save the total, but guard against unreasonable probability,
2236 // as the value is used to estimate the loop trip count.
2237 // An infinite trip count would blur relative block
2238 // frequencies.
2239 if (exits_sum > 1.0f) exits_sum = 1.0;
2240 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
2241 _exit_prob = exits_sum;
2242 }
2243 }
2244
2245 //------------------------------succ_prob-------------------------------------
2246 // Determine the probability of reaching successor 'i' from the receiver block.
2247 float Block::succ_prob(uint i) {
2248 int eidx = end_idx();
2249 Node *n = get_node(eidx); // Get ending Node
2250
2251 int op = n->Opcode();
2252 if (n->is_Mach()) {
2253 if (n->is_MachNullCheck()) {
2254 // Can only reach here if called after lcm. The original Op_If is gone,
2255 // so we attempt to infer the probability from one or both of the
2256 // successor blocks.
2257 assert(_num_succs == 2, "expecting 2 successors of a null check");
2258 // If either successor has only one predecessor, then the
2259 // probability estimate can be derived using the
2260 // relative frequency of the successor and this block.
2261 if (_succs[i]->num_preds() == 2) {
2262 return _succs[i]->_freq / _freq;
2263 } else if (_succs[1-i]->num_preds() == 2) {
2264 return 1 - (_succs[1-i]->_freq / _freq);
2265 } else {
2266 // Estimate using both successor frequencies
2267 float freq = _succs[i]->_freq;
2268 return freq / (freq + _succs[1-i]->_freq);
2269 }
2270 }
2271 op = n->as_Mach()->ideal_Opcode();
2272 }
2273
2274
2275 // Switch on branch type
2276 switch( op ) {
2277 case Op_CountedLoopEnd:
2278 case Op_If: {
2279 assert (i < 2, "just checking");
2280 // Conditionals pass on only part of their frequency
2281 float prob = n->as_MachIf()->_prob;
2282 assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
2283 // If succ[i] is the FALSE branch, invert path info
2284 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
2285 return 1.0f - prob; // not taken
2286 } else {
2287 return prob; // taken
2288 }
2289 }
2290
2291 case Op_Jump:
2292 return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
2293
2294 case Op_Catch: {
2295 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2296 if (ci->_con == CatchProjNode::fall_through_index) {
2297 // Fall-thru path gets the lion's share.
2298 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
2299 } else {
2300 // Presume exceptional paths are equally unlikely
2301 return PROB_UNLIKELY_MAG(5);
2302 }
2303 }
2304
2305 case Op_Root:
2306 case Op_Goto:
2307 // Pass frequency straight thru to target
2308 return 1.0f;
2309
2310 case Op_NeverBranch: {
2311 Node* succ = n->as_NeverBranch()->proj_out(0)->unique_ctrl_out();
2312 if (_succs[i]->head() == succ) {
2313 return 1.0f;
2314 }
2315 return 0.0f;
2316 }
2317
2318 case Op_TailCall:
2319 case Op_TailJump:
2320 case Op_ForwardException:
2321 case Op_Return:
2322 case Op_Halt:
2323 case Op_Rethrow:
2324 // Do not push out freq to root block
2325 return 0.0f;
2326
2327 default:
2328 ShouldNotReachHere();
2329 }
2330
2331 return 0.0f;
2332 }
2333
2334 //------------------------------num_fall_throughs-----------------------------
2335 // Return the number of fall-through candidates for a block
2336 int Block::num_fall_throughs() {
2337 int eidx = end_idx();
2338 Node *n = get_node(eidx); // Get ending Node
2339
2340 int op = n->Opcode();
2341 if (n->is_Mach()) {
2342 if (n->is_MachNullCheck()) {
2343 // In theory, either side can fall-thru, for simplicity sake,
2344 // let's say only the false branch can now.
2345 return 1;
2346 }
2347 op = n->as_Mach()->ideal_Opcode();
2348 }
2349
2350 // Switch on branch type
2351 switch( op ) {
2352 case Op_CountedLoopEnd:
2353 case Op_If:
2354 return 2;
2355
2356 case Op_Root:
2357 case Op_Goto:
2358 return 1;
2359
2360 case Op_Catch: {
2361 for (uint i = 0; i < _num_succs; i++) {
2362 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2363 if (ci->_con == CatchProjNode::fall_through_index) {
2364 return 1;
2365 }
2366 }
2367 return 0;
2368 }
2369
2370 case Op_Jump:
2371 case Op_NeverBranch:
2372 case Op_TailCall:
2373 case Op_TailJump:
2374 case Op_ForwardException:
2375 case Op_Return:
2376 case Op_Halt:
2377 case Op_Rethrow:
2378 return 0;
2379
2380 default:
2381 ShouldNotReachHere();
2382 }
2383
2384 return 0;
2385 }
2386
2387 //------------------------------succ_fall_through-----------------------------
2388 // Return true if a specific successor could be fall-through target.
2389 bool Block::succ_fall_through(uint i) {
2390 int eidx = end_idx();
2391 Node *n = get_node(eidx); // Get ending Node
2392
2393 int op = n->Opcode();
2394 if (n->is_Mach()) {
2395 if (n->is_MachNullCheck()) {
2396 // In theory, either side can fall-thru, for simplicity sake,
2397 // let's say only the false branch can now.
2398 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
2399 }
2400 op = n->as_Mach()->ideal_Opcode();
2401 }
2402
2403 // Switch on branch type
2404 switch( op ) {
2405 case Op_CountedLoopEnd:
2406 case Op_If:
2407 case Op_Root:
2408 case Op_Goto:
2409 return true;
2410
2411 case Op_Catch: {
2412 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2413 return ci->_con == CatchProjNode::fall_through_index;
2414 }
2415
2416 case Op_Jump:
2417 case Op_NeverBranch:
2418 case Op_TailCall:
2419 case Op_TailJump:
2420 case Op_ForwardException:
2421 case Op_Return:
2422 case Op_Halt:
2423 case Op_Rethrow:
2424 return false;
2425
2426 default:
2427 ShouldNotReachHere();
2428 }
2429
2430 return false;
2431 }
2432
2433 //------------------------------update_uncommon_branch------------------------
2434 // Update the probability of a two-branch to be uncommon
2435 void Block::update_uncommon_branch(Block* ub) {
2436 int eidx = end_idx();
2437 Node *n = get_node(eidx); // Get ending Node
2438
2439 int op = n->as_Mach()->ideal_Opcode();
2440
2441 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
2442 assert(num_fall_throughs() == 2, "must be a two way branch block");
2443
2444 // Which successor is ub?
2445 uint s;
2446 for (s = 0; s <_num_succs; s++) {
2447 if (_succs[s] == ub) break;
2448 }
2449 assert(s < 2, "uncommon successor must be found");
2450
2451 // If ub is the true path, make the proability small, else
2452 // ub is the false path, and make the probability large
2453 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
2454
2455 // Get existing probability
2456 float p = n->as_MachIf()->_prob;
2457
2458 if (invert) p = 1.0 - p;
2459 if (p > PROB_MIN) {
2460 p = PROB_MIN;
2461 }
2462 if (invert) p = 1.0 - p;
2463
2464 n->as_MachIf()->_prob = p;
2465 }
2466
2467 //------------------------------update_succ_freq-------------------------------
2468 // Update the appropriate frequency associated with block 'b', a successor of
2469 // a block in this loop.
2470 void CFGLoop::update_succ_freq(Block* b, double freq) {
2471 if (b->_loop == this) {
2472 if (b == head()) {
2473 // back branch within the loop
2474 // Do nothing now, the loop carried frequency will be
2475 // adjust later in scale_freq().
2476 } else {
2477 // simple branch within the loop
2478 b->_freq += freq;
2479 }
2480 } else if (!in_loop_nest(b)) {
2481 // branch is exit from this loop
2482 BlockProbPair bpp(b, freq);
2483 _exits.append(bpp);
2484 } else {
2485 // branch into nested loop
2486 CFGLoop* ch = b->_loop;
2487 ch->_freq += freq;
2488 }
2489 }
2490
2491 //------------------------------in_loop_nest-----------------------------------
2492 // Determine if block b is in the receiver's loop nest.
2493 bool CFGLoop::in_loop_nest(Block* b) {
2494 int depth = _depth;
2495 CFGLoop* b_loop = b->_loop;
2496 int b_depth = b_loop->_depth;
2497 if (depth == b_depth) {
2498 return true;
2499 }
2500 while (b_depth > depth) {
2501 b_loop = b_loop->_parent;
2502 b_depth = b_loop->_depth;
2503 }
2504 return b_loop == this;
2505 }
2506
2507 //------------------------------scale_freq-------------------------------------
2508 // Scale frequency of loops and blocks by trip counts from outer loops
2509 // Do a top down traversal of loop tree (visit outer loops first.)
2510 void CFGLoop::scale_freq() {
2511 double loop_freq = _freq * trip_count();
2512 _freq = loop_freq;
2513 for (int i = 0; i < _members.length(); i++) {
2514 CFGElement* s = _members.at(i);
2515 double block_freq = s->_freq * loop_freq;
2516 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2517 block_freq = MIN_BLOCK_FREQUENCY;
2518 s->_freq = block_freq;
2519 }
2520 CFGLoop* ch = _child;
2521 while (ch != nullptr) {
2522 ch->scale_freq();
2523 ch = ch->_sibling;
2524 }
2525 }
2526
2527 // Frequency of outer loop
2528 double CFGLoop::outer_loop_freq() const {
2529 if (_child != nullptr) {
2530 return _child->_freq;
2531 }
2532 return _freq;
2533 }
2534
2535 #ifndef PRODUCT
2536 //------------------------------dump_tree--------------------------------------
2537 void CFGLoop::dump_tree() const {
2538 dump();
2539 if (_child != nullptr) _child->dump_tree();
2540 if (_sibling != nullptr) _sibling->dump_tree();
2541 }
2542
2543 //------------------------------dump-------------------------------------------
2544 void CFGLoop::dump() const {
2545 for (int i = 0; i < _depth; i++) tty->print(" ");
2546 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n",
2547 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2548 for (int i = 0; i < _depth; i++) tty->print(" ");
2549 tty->print(" members:");
2550 int k = 0;
2551 for (int i = 0; i < _members.length(); i++) {
2552 if (k++ >= 6) {
2553 tty->print("\n ");
2554 for (int j = 0; j < _depth+1; j++) tty->print(" ");
2555 k = 0;
2556 }
2557 CFGElement *s = _members.at(i);
2558 if (s->is_block()) {
2559 Block *b = s->as_Block();
2560 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2561 } else {
2562 CFGLoop* lp = s->as_CFGLoop();
2563 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2564 }
2565 }
2566 tty->print("\n");
2567 for (int i = 0; i < _depth; i++) tty->print(" ");
2568 tty->print(" exits: ");
2569 k = 0;
2570 for (int i = 0; i < _exits.length(); i++) {
2571 if (k++ >= 7) {
2572 tty->print("\n ");
2573 for (int j = 0; j < _depth+1; j++) tty->print(" ");
2574 k = 0;
2575 }
2576 Block *blk = _exits.at(i).get_target();
2577 double prob = _exits.at(i).get_prob();
2578 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2579 }
2580 tty->print("\n");
2581 }
2582 #endif