1 /*
  2  * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "opto/c2_MacroAssembler.hpp"
 26 #include "opto/callnode.hpp"
 27 #include "opto/compile.hpp"
 28 #include "opto/loopnode.hpp"
 29 #include "opto/phaseX.hpp"
 30 #include "opto/reachability.hpp"
 31 #include "opto/regalloc.hpp"
 32 #include "opto/runtime.hpp"
 33 #include "utilities/pair.hpp"
 34 
 35 /*
 36  * java.lang.ref.Reference::reachabilityFence support.
 37  *
 38  * Reachability Fence (RF) ensures that the given object (referent) remains strongly reachable
 39  * regardless of any optimizing transformations the virtual machine may perform that might otherwise
 40  * allow the object to become unreachable.
 41  *
 42  * RFs are intended to be used in performance-critical code, so the primary goal for C2 support is
 43  * to reduce their runtime overhead as much as possible.
 44  *
 45  * Reference::reachabilityFence() calls are intrinsified into ReachabilityFence CFG nodes. RF node keeps
 46  * its referent alive, so the referent's location is recorded at every safepoint (in its oop map) which
 47  * interferes with referent's live range.
 48  *
 49  * It is tempting to directly attach referents to interfering safepoints right from the beginning, but it
 50  * doesn't play well with some optimizations C2 does (e.g., during loop-invariant code motion a safepoint
 51  * can become interfering once a load is hoisted).
 52  *
 53  * Instead, reachability representation transitions through multiple phases:
 54  *   (0) initial set of RFs is materialized during parsing (as a result of
 55  *       Reference.reachabilityFence intrinsification);
 56  *   (1) optimization pass during loop opts eliminates redundant RF nodes and
 57  *       moves the ones with loop-invariant referents outside (after) loops;
 58  *   (2) after loop opts are over, RF nodes are eliminated and their referents are transferred to
 59  *       safepoint nodes (appended as edges after debug info);
 60  *   (3) during final graph reshaping, referent edges are removed from safepoints and materialized as RF nodes
 61  *       attached to their safepoint node (closely following it in CFG graph).
 62  *
 63  * Some implementation considerations.
 64  *
 65  * (a) It looks attractive to get rid of RF nodes early and transfer to safepoint-attached representation,
 66  * but it is not correct until loop opts are done.
 67  *
 68  * Live ranges of values are routinely extended during loop opts. And it can break the invariant that
 69  * all interfering safepoints contain the referent in their oop map. (If an interfering safepoint doesn't
 70  * keep the referent alive, then it becomes possible for the referent to be prematurely GCed.)
 71  *
 72  * compiler/c2/TestReachabilityFence.java demonstrates a situation where a load is hoisted out of a loop thus
 73  * extending the live range of the value it produces beyond the safepoint on loop-back edge.
 74  *
 75  * After loop opts are over, it becomes possible to reliably enumerate all interfering safepoints and
 76  * to ensure that the referent is present in their oop maps. Current assumption is that after loop opts the IR graph
 77  * is stable enough, so relative order of memory operations and safepoints is preserved and only safepoints between
 78  * a referent and it's uses are taken into account. A more conservative analysis can be employed -- any safepoint dominated
 79  * by a referent is treated as interfering with it -- if it turns out that the assumption doesn't hold.
 80  *
 81  * (b) RF nodes may interfere with Register Allocator (RA). If a safepoint is pruned during macro expansion,
 82  * it can make some RF nodes redundant, but we don't have information about their relations anymore to detect that.
 83  * Redundant RF node unnecessarily extends referent's live range and increases register pressure.
 84  *
 85  * Hence, we eliminate RF nodes and transfer their referents to corresponding safepoints (phase #2).
 86  * When safepoints are pruned, corresponding reachability edges also go away.
 87  *
 88  * (c) Unfortunately, it's not straightforward to stay with safepoint-attached representation till the very end,
 89  * because information about derived oops is attached to safepoints in a similar way. So, for now RFs are
 90  * rematerialized at safepoints before RA (phase #3).
 91  */
 92 
 93 bool ReachabilityFenceNode::is_redundant(PhaseGVN& gvn) {
 94   const Type* referent_t = gvn.type(referent());
 95   if (referent_t == TypePtr::NULL_PTR) {
 96     return true; // no-op fence: null referent
 97   }
 98   if (!OptimizeReachabilityFences) {
 99     return false; // keep reachability fence nodes intact
100   }
101   if (!PreserveReachabilityFencesOnConstants && referent_t->singleton()) {
102     return true; // no-op fence: constants are strongly reachable
103   }
104   return false;
105 }
106 
107 Node* ReachabilityFenceNode::Ideal(PhaseGVN* phase, bool can_reshape) {
108   return (remove_dead_region(phase, can_reshape) ? this : nullptr);
109 }
110 
111 Node* ReachabilityFenceNode::Identity(PhaseGVN* phase) {
112   if (is_redundant(*phase)) {
113     return in(0);
114   }
115   return this;
116 }
117 
118 // Turn the RF node into a no-op by setting its referent to null.
119 // Subsequent IGVN pass removes cleared nodes.
120 bool ReachabilityFenceNode::clear_referent(PhaseIterGVN& phase) {
121   if (phase.type(referent()) == TypePtr::NULL_PTR) {
122     return false;
123   } else {
124     phase.replace_input_of(this, 1, phase.makecon(TypePtr::NULL_PTR));
125     return true;
126   }
127 }
128 
129 #ifndef PRODUCT
130 static void rf_desc(outputStream* st, const ReachabilityFenceNode* rf, PhaseRegAlloc* ra) {
131   char buf[50];
132   ra->dump_register(rf->referent(), buf, sizeof(buf));
133   st->print("reachability fence [%s]", buf);
134 }
135 
136 void ReachabilityFenceNode::format(PhaseRegAlloc* ra, outputStream* st) const {
137   rf_desc(st, this, ra);
138 }
139 
140 void ReachabilityFenceNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra) const {
141   ResourceMark rm;
142   stringStream ss;
143   rf_desc(&ss, this, ra);
144   const char* desc = masm->code_string(ss.freeze());
145   masm->block_comment(desc);
146 }
147 #endif
148 
149 // Detect safepoint nodes which are important for reachability tracking purposes.
150 // Some SafePoint nodes can't affect referent's reachability in any noticeable way and
151 // can be safely ignored during the analysis.
152 static bool is_interfering_sfpt_candidate(SafePointNode* sfpt) {
153   if (sfpt->jvms() == nullptr) {
154     return false; // not a real safepoint
155   } else if (sfpt->is_CallStaticJava() && sfpt->as_CallStaticJava()->is_uncommon_trap()) {
156     return false; // uncommon traps are exit points
157   }
158   return true; // a full-blown safepoint can interfere with a reachability fence and its referent
159 }
160 
161 void PhaseIdealLoop::insert_rf(Node* ctrl, Node* referent) {
162   IdealLoopTree* lpt = get_loop(ctrl);
163   Node* ctrl_end = ctrl->unique_ctrl_out();
164 
165   auto new_rf = new ReachabilityFenceNode(C, ctrl, referent);
166 
167   register_control(new_rf, lpt, ctrl);
168   set_idom(new_rf, ctrl, dom_depth(ctrl) + 1);
169   lpt->register_reachability_fence(new_rf);
170 
171   igvn().rehash_node_delayed(ctrl_end);
172   ctrl_end->replace_edge(ctrl, new_rf);
173 
174   if (idom(ctrl_end) == ctrl) {
175     set_idom(ctrl_end, new_rf, dom_depth(new_rf) + 1);
176   } else {
177     assert(ctrl_end->is_Region(), "");
178   }
179 }
180 
181 void PhaseIdealLoop::replace_rf(Node* old_node, Node* new_node) {
182   assert(old_node->is_ReachabilityFence() ||
183          (old_node->is_Proj() && old_node->in(0)->is_ReachabilityFence()),
184          "%s", NodeClassNames[old_node->Opcode()]);
185 
186   IdealLoopTree* lpt = get_loop(old_node);
187   lpt->_body.yank(old_node);
188   assert(lpt->_reachability_fences != nullptr, "missing");
189   assert(lpt->_reachability_fences->contains(old_node), "missing");
190   lpt->_reachability_fences->yank(old_node);
191   replace_node_and_forward_ctrl(old_node, new_node);
192 }
193 
194 void PhaseIdealLoop::remove_rf(ReachabilityFenceNode* rf) {
195   Node* rf_ctrl_in = rf->in(0);
196   Node* referent = rf->referent();
197   if (rf->clear_referent(igvn()) && referent->outcnt() == 0) {
198     remove_dead_data_node(referent);
199   }
200   replace_rf(rf, rf_ctrl_in);
201 }
202 
203 //======================================================================
204 //---------------------------- Phase 1 ---------------------------------
205 // Optimization pass over reachability fences during loop opts.
206 // Moves RFs with loop-invariant referents out of the loop.
207 bool PhaseIdealLoop::optimize_reachability_fences() {
208   Compile::TracePhase tp(_t_reachability_optimize);
209 
210   assert(OptimizeReachabilityFences, "required");
211 
212   // ResourceMark rm; // NB! not safe because insert_rf may trigger _idom reallocation
213   Unique_Node_List redundant_rfs;
214   typedef Pair<Node*,Node*> LoopExit;
215   GrowableArray<LoopExit> worklist;
216 
217   for (int i = 0; i < C->reachability_fences_count(); i++) {
218     ReachabilityFenceNode* rf = C->reachability_fence(i);
219     assert(!rf->is_redundant(igvn()), "required");
220     // Move RFs out of counted loops when possible.
221     IdealLoopTree* lpt = get_loop(rf);
222     Node* referent = rf->referent();
223     if (lpt->is_invariant(referent)) {
224       IfFalseNode* unique_loop_exit = lpt->unique_loop_exit_proj_or_null();
225       if (unique_loop_exit != nullptr) {
226         // Skip over an outer strip-mined loop.
227         if (!lpt->is_root()) {
228           IdealLoopTree* outer_lpt = lpt->_parent;
229           if (outer_lpt->head()->is_OuterStripMinedLoop()) {
230             if (outer_lpt->is_invariant(referent)) {
231               IfNode* outer_loop_end = outer_lpt->head()->as_OuterStripMinedLoop()->outer_loop_end();
232               if (outer_loop_end != nullptr && outer_loop_end->false_proj_or_null() != nullptr) {
233                 unique_loop_exit = outer_loop_end->false_proj_or_null();
234               }
235             } else {
236               // An attempt to insert an RF node inside outer strip-mined loop breaks
237               // its IR invariants and manifests as assertion failures.
238               assert(false, "not loop invariant in outer strip-mined loop");
239               continue; // skip
240             }
241           }
242         }
243 
244         LoopExit p(referent, unique_loop_exit);
245         worklist.push(p);
246         redundant_rfs.push(rf);
247 
248 #ifndef PRODUCT
249         if (TraceLoopOpts) {
250           IdealLoopTree* loop = get_loop(unique_loop_exit->in(0));
251           tty->print_cr("ReachabilityFence: N%d: %s N%d/N%d -> loop exit N%d (%s N%d/N%d)",
252                         rf->_idx, lpt->head()->Name(), lpt->head()->_idx, lpt->tail()->_idx,
253                         unique_loop_exit->_idx,
254                         loop->head()->Name(), loop->head()->_idx, loop->tail()->_idx);
255         }
256 #endif // !PRODUCT
257       }
258     }
259   }
260 
261   // Populate RFs outside loops.
262   while (worklist.is_nonempty()) {
263     LoopExit p = worklist.pop();
264     Node* referent = p.first;
265     Node* ctrl_out = p.second;
266     insert_rf(ctrl_out, referent);
267   }
268 
269   // Eliminate redundant RFs.
270   bool progress = (redundant_rfs.size() > 0);
271   while (redundant_rfs.size() > 0) {
272     remove_rf(redundant_rfs.pop()->as_ReachabilityFence());
273   }
274 
275   return progress;
276 }
277 
278 //======================================================================
279 //---------------------------- Phase 2 ---------------------------------
280 
281 // Linearly traverse CFG upwards starting at ctrl_start until first merge point.
282 // All encountered safepoints are recorded in safepoints list, except
283 // the ones filtered out by is_interfering_sfpt_candidate().
284 static void enumerate_interfering_sfpts_linear_traversal(Node* ctrl_start, Node_Stack& stack, VectorSet& visited,
285                                                          Node_List& interfering_sfpts) {
286   for (Node* ctrl = ctrl_start; ctrl != nullptr; ctrl = ctrl->in(0)) {
287     assert(ctrl->is_CFG(), "");
288     if (visited.test_set(ctrl->_idx)) {
289       return;
290     } else {
291       if (ctrl->is_Region()) {
292         stack.push(ctrl, 1);
293         return; // stop at merge points
294       } else if (ctrl->is_SafePoint() && is_interfering_sfpt_candidate(ctrl->as_SafePoint())) {
295         assert(!ctrl->is_CallStaticJava() || !ctrl->as_CallStaticJava()->is_uncommon_trap(),
296                "uncommon traps should not be enumerated");
297         interfering_sfpts.push(ctrl);
298       }
299     }
300   }
301 }
302 
303 // Enumerate all safepoints which are reachable from the RF to its referent through CFG.
304 // Start at RF node and traverse CFG upwards until referent's control node is reached.
305 static void enumerate_interfering_sfpts(ReachabilityFenceNode* rf, PhaseIdealLoop* phase,
306                                         Node_Stack& stack, VectorSet& visited,
307                                         Node_List& interfering_sfpts) {
308   assert(stack.is_empty(), "required");
309   assert(visited.is_empty(), "required");
310 
311   Node* referent = rf->referent();
312   Node* referent_ctrl = phase->get_ctrl(referent);
313   assert(phase->is_dominator(referent_ctrl, rf), "sanity");
314 
315   visited.set(referent_ctrl->_idx); // end point
316   enumerate_interfering_sfpts_linear_traversal(rf, stack, visited, interfering_sfpts); // starting point in CFG
317   while (stack.is_nonempty()) {
318     Node* cur = stack.node();
319     uint  idx = stack.index();
320 
321     assert(cur != nullptr, "");
322     assert(cur->is_Region(), "%s", NodeClassNames[cur->Opcode()]);
323     assert(phase->is_dominator(referent_ctrl, cur), "");
324     assert(idx > 0 && idx <= cur->req(), "%d %d", idx, cur->req());
325 
326     if (idx < cur->req()) {
327       stack.set_index(idx + 1);
328       enumerate_interfering_sfpts_linear_traversal(cur->in(idx), stack, visited, interfering_sfpts);
329     } else {
330       stack.pop();
331     }
332   }
333   // Reset temporary structures to their initial state.
334   assert(stack.is_empty(), "required");
335   visited.clear();
336 }
337 
338 // Start offset for reachability info on a safepoint node.
339 static uint rf_base_offset(SafePointNode* sfpt) {
340   return sfpt->jvms()->debug_end();
341 }
342 
343 static bool dominates_another_rf(ReachabilityFenceNode* rf, PhaseIdealLoop* phase) {
344   assert(!rf->is_redundant(phase->igvn()), "");
345 
346   for (int i = 0; i < phase->C->reachability_fences_count(); i++) {
347     ReachabilityFenceNode* other_rf = phase->C->reachability_fence(i);
348     assert(other_rf->outcnt() > 0, "dead node");
349     if (rf != other_rf && rf->referent()->eqv_uncast(other_rf->referent()) &&
350         phase->is_dominator(rf, other_rf)) {
351       return true; // dominates another reachability fence with the same referent
352     }
353   }
354   return false;
355 }
356 
357 // Phase 2: migrate reachability info to safepoints.
358 // All RFs are replaced with edges from corresponding referents to interfering safepoints.
359 // Interfering safepoints are safepoint nodes which are reachable from the RF to its referent through CFG.
360 bool PhaseIdealLoop::expand_reachability_fences() {
361   Compile::TracePhase tp(_t_reachability_expand);
362 
363   assert(OptimizeReachabilityFences, "required");
364   assert(C->post_loop_opts_phase(), "required");
365   DEBUG_ONLY( int no_of_constant_rfs = 0; )
366 
367   ResourceMark rm;
368   Unique_Node_List for_removal;
369   typedef Pair<SafePointNode*,Node*> ReachabilityEdge;
370   GrowableArray<ReachabilityEdge> reachability_edges;
371   {
372     // Reuse temporary structures to avoid allocating them for every single RF node.
373     Node_List sfpt_worklist;
374     Node_Stack stack(0);
375     VectorSet visited;
376 
377     for (int i = 0; i < C->reachability_fences_count(); i++) {
378       ReachabilityFenceNode* rf = C->reachability_fence(i);
379       assert(!rf->is_redundant(igvn()), "missed");
380       if (PreserveReachabilityFencesOnConstants) {
381         const Type* referent_t = igvn().type(rf->referent());
382         assert(referent_t != TypePtr::NULL_PTR, "redundant rf");
383         bool is_constant_rf = referent_t->singleton();
384         if (is_constant_rf) {
385           DEBUG_ONLY( no_of_constant_rfs += 1; )
386           continue; // leave RFs on constants intact
387         }
388       }
389       if (dominates_another_rf(rf, this)) {
390         // Redundant fence: dominates another RF with the same referent.
391         // RF is redundant for some referent oop when the referent has another RF which
392         // keeps it alive across the RF. In terms of dominance relation it can be formulated
393         // as "a referent has another RF which is dominated by the redundant RF".
394         //
395         // NB! It is safe to assume that dominating RF is redundant only during expansion.
396         // Otherwise, there's a chance for the superseding RF node to go away before expansion.
397         // Non-RF users are ignored for a similar reason: they can go away before or after expansion
398         // takes place, so no guarantees reachability information is preserved.
399       } else {
400         assert(sfpt_worklist.size() == 0, "not empty");
401         enumerate_interfering_sfpts(rf, this, stack, visited, sfpt_worklist);
402 
403         Node* referent = rf->referent();
404         while (sfpt_worklist.size() > 0) {
405           SafePointNode* sfpt = sfpt_worklist.pop()->as_SafePoint();
406           assert(is_dominator(get_ctrl(referent), sfpt), "");
407           assert(sfpt->req() == rf_base_offset(sfpt), "no extra edges allowed");
408           if (sfpt->find_edge(referent) == -1) {
409             ReachabilityEdge p(sfpt, referent);
410             reachability_edges.push(p);
411           }
412         }
413       }
414       for_removal.push(rf);
415     }
416   }
417   // Materialize reachability edges.
418   while (reachability_edges.length() > 0) {
419     ReachabilityEdge p = reachability_edges.pop();
420     SafePointNode* sfpt = p.first;
421     Node* referent = p.second;
422     if (sfpt->find_edge(referent) == -1) {
423       sfpt->add_req(referent);
424       igvn()._worklist.push(sfpt);
425     }
426   }
427   // Eliminate processed RFs. They become redundant once reachability edges are added.
428   bool progress = (for_removal.size() > 0);
429   while (for_removal.size() > 0) {
430     remove_rf(for_removal.pop()->as_ReachabilityFence());
431   }
432 
433   assert(C->reachability_fences_count() == no_of_constant_rfs, "");
434   return progress;
435 }
436 
437 //======================================================================
438 //---------------------------- Phase 3 ---------------------------------
439 
440 // Find a point in CFG right after safepoint node to insert reachability fence.
441 static Node* sfpt_ctrl_out(SafePointNode* sfpt) {
442   if (sfpt->is_Call()) {
443     CallProjections callprojs;
444     sfpt->as_Call()->extract_projections(&callprojs,
445                                          false /*separate_io_proj*/,
446                                          false /*do_asserts*/,
447                                          true /*allow_handlers*/);
448     // Calls can have multiple control projections. However, reachability edge expansion
449     // happens during final graph reshaping which is performed very late in compilation pipeline.
450     // The assumption here is that the control path chosen for insertion can't go away.
451     // So, materializing a reachability fence on a single control path produced by a call
452     // is enough to keep the referent oop alive across the call.
453     if (callprojs.fallthrough_catchproj != nullptr) {
454       return callprojs.fallthrough_catchproj;
455     } else if (callprojs.catchall_catchproj != nullptr) {
456       return callprojs.catchall_catchproj; // rethrow stub
457     } else if (callprojs.fallthrough_proj != nullptr) {
458       return callprojs.fallthrough_proj; // no exceptions thrown
459     } else {
460       ShouldNotReachHere();
461     }
462   } else {
463     return sfpt;
464   }
465 }
466 
467 // Phase 3: materialize reachability fences from reachability edges on safepoints.
468 // Turn extra safepoint edges into reachability fences immediately following the safepoint.
469 //
470 // NB! As of now, a special care is needed to properly enumerate reachability edges because
471 // there are other use cases for non-debug safepoint edges. expand_reachability_edges() runs
472 // after macro expansion where runtime calls during array allocation are annotated with
473 // valid_length_test_input as an extra edges. Until there's a mechanism to distinguish between
474 // different types of non-debug edges, unrelated cases are filtered out explicitly and in ad-hoc manner.
475 void Compile::expand_reachability_edges(Unique_Node_List& safepoints) {
476   for (uint i = 0; i < safepoints.size(); i++) {
477     SafePointNode* sfpt = safepoints.at(i)->as_SafePoint();
478 
479     uint rf_offset = rf_base_offset(sfpt);
480     if (sfpt->jvms() != nullptr && sfpt->req() > rf_offset) {
481       assert(is_interfering_sfpt_candidate(sfpt), "");
482       Node* ctrl_out = sfpt_ctrl_out(sfpt);
483       Node* ctrl_end = ctrl_out->unique_ctrl_out();
484 
485       Node* extra_edge = nullptr;
486       if (sfpt->is_Call()) {
487         address entry = sfpt->as_Call()->entry_point();
488         if (entry == OptoRuntime::new_array_Java() ||
489             entry == OptoRuntime::new_array_nozero_Java()) {
490           // valid_length_test_input is appended during macro expansion at the very end
491           int last_idx = sfpt->req() - 1;
492           extra_edge = sfpt->in(last_idx);
493           sfpt->del_req(last_idx);
494         }
495       }
496 
497       while (sfpt->req() > rf_offset) {
498         int idx = sfpt->req() - 1;
499         Node* referent = sfpt->in(idx);
500         sfpt->del_req(idx);
501 
502         Node* new_rf = new ReachabilityFenceNode(C, ctrl_out, referent);
503         ctrl_end->replace_edge(ctrl_out, new_rf);
504         ctrl_end = new_rf;
505       }
506 
507       if (extra_edge != nullptr) {
508         sfpt->add_req(extra_edge); // Add valid_length_test_input edge back
509       }
510     }
511   }
512 }