1 /*
  2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
 26 #define SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
 27 
 28 #include "memory/allocation.hpp"
 29 #include "oops/accessDecorators.hpp"
 30 #include "opto/loopnode.hpp"
 31 #include "opto/machnode.hpp"
 32 #include "opto/matcher.hpp"
 33 #include "opto/memnode.hpp"
 34 #include "utilities/globalDefinitions.hpp"
 35 
 36 // This means the access is mismatched. This means the value of an access
 37 // is not equivalent to the value pointed to by the address.
 38 const DecoratorSet C2_MISMATCHED             = DECORATOR_LAST << 1;
 39 // The access may not be aligned to its natural size.
 40 const DecoratorSet C2_UNALIGNED              = DECORATOR_LAST << 2;
 41 // The atomic cmpxchg is weak, meaning that spurious false negatives are allowed,
 42 // but never false positives.
 43 const DecoratorSet C2_WEAK_CMPXCHG           = DECORATOR_LAST << 3;
 44 // This denotes that a load has control dependency.
 45 const DecoratorSet C2_CONTROL_DEPENDENT_LOAD = DECORATOR_LAST << 4;
 46 // This denotes that a load that must be pinned, but may float above safepoints.
 47 const DecoratorSet C2_UNKNOWN_CONTROL_LOAD   = DECORATOR_LAST << 5;
 48 // This denotes that the access is produced from the sun.misc.Unsafe intrinsics.
 49 const DecoratorSet C2_UNSAFE_ACCESS          = DECORATOR_LAST << 6;
 50 // This denotes that the access mutates state.
 51 const DecoratorSet C2_WRITE_ACCESS           = DECORATOR_LAST << 7;
 52 // This denotes that the access reads state.
 53 const DecoratorSet C2_READ_ACCESS            = DECORATOR_LAST << 8;
 54 // A nearby allocation?
 55 const DecoratorSet C2_TIGHTLY_COUPLED_ALLOC  = DECORATOR_LAST << 9;
 56 // Loads and stores from an arraycopy being optimized
 57 const DecoratorSet C2_ARRAY_COPY             = DECORATOR_LAST << 10;
 58 // Loads from immutable memory
 59 const DecoratorSet C2_IMMUTABLE_MEMORY       = DECORATOR_LAST << 11;
 60 
 61 class Compile;
 62 class ConnectionGraph;
 63 class GraphKit;
 64 class IdealKit;
 65 class Node;
 66 class PhaseGVN;
 67 class PhaseIdealLoop;
 68 class PhaseMacroExpand;
 69 class Type;
 70 class TypePtr;
 71 class Unique_Node_List;
 72 
 73 // This class wraps a node and a type.
 74 class C2AccessValue: public StackObj {
 75 protected:
 76   Node* _node;
 77   const Type* _type;
 78 
 79 public:
 80   C2AccessValue(Node* node, const Type* type) :
 81     _node(node),
 82     _type(type) {}
 83 
 84   Node* node() const        { return _node; }
 85   const Type* type() const  { return _type; }
 86 
 87   void set_node(Node* node) { _node = node; }
 88 };
 89 
 90 // This class wraps a node and a pointer type.
 91 class C2AccessValuePtr: public C2AccessValue {
 92 
 93 public:
 94   C2AccessValuePtr(Node* node, const TypePtr* type) :
 95     C2AccessValue(node, reinterpret_cast<const Type*>(type)) {}
 96 
 97   const TypePtr* type() const { return reinterpret_cast<const TypePtr*>(_type); }
 98 };
 99 
100 // This class wraps a bunch of context parameters that are passed around in the
101 // BarrierSetC2 backend hierarchy, for loads and stores, to reduce boiler plate.
102 class C2Access: public StackObj {
103 protected:
104   DecoratorSet      _decorators;
105   Node*             _base;
106   C2AccessValuePtr& _addr;
107   Node*             _raw_access;
108   BasicType         _type;
109   uint8_t           _barrier_data;
110 
111   void fixup_decorators();
112 
113 public:
114   C2Access(DecoratorSet decorators,
115            BasicType type, Node* base, C2AccessValuePtr& addr) :
116     _decorators(decorators),
117     _base(base),
118     _addr(addr),
119     _raw_access(nullptr),
120     _type(type),
121     _barrier_data(0)
122   {}
123 
124   DecoratorSet decorators() const { return _decorators; }
125   Node* base() const              { return _base; }
126   C2AccessValuePtr& addr() const  { return _addr; }
127   BasicType type() const          { return _type; }
128   bool is_oop() const             { return is_reference_type(_type); }
129   bool is_raw() const             { return (_decorators & AS_RAW) != 0; }
130   Node* raw_access() const        { return _raw_access; }
131 
132   uint8_t barrier_data() const        { return _barrier_data; }
133   void set_barrier_data(uint8_t data) { _barrier_data = data; }
134 
135   void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
136   virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
137 
138   MemNode::MemOrd mem_node_mo() const;
139   bool needs_cpu_membar() const;
140 
141   virtual PhaseGVN& gvn() const = 0;
142   virtual bool is_parse_access() const { return false; }
143   virtual bool is_opt_access() const { return false; }
144 };
145 
146 // C2Access for parse time calls to the BarrierSetC2 backend.
147 class C2ParseAccess: public C2Access {
148 protected:
149   GraphKit*         _kit;
150   Node* _ctl;
151 
152   void* barrier_set_state() const;
153 
154 public:
155   C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
156                 BasicType type, Node* base, C2AccessValuePtr& addr,
157                 Node* ctl = nullptr) :
158     C2Access(decorators, type, base, addr),
159     _kit(kit),
160     _ctl(ctl) {
161     fixup_decorators();
162   }
163 
164   GraphKit* kit() const           { return _kit; }
165   Node* control() const;
166 
167   virtual PhaseGVN& gvn() const;
168   virtual bool is_parse_access() const { return true; }
169 };
170 
171 // This class wraps a bunch of context parameters that are passed around in the
172 // BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
173 class C2AtomicParseAccess: public C2ParseAccess {
174   Node* _memory;
175   uint  _alias_idx;
176 
177 public:
178   C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
179                  Node* base, C2AccessValuePtr& addr, uint alias_idx) :
180     C2ParseAccess(kit, decorators, type, base, addr),
181     _memory(nullptr),
182     _alias_idx(alias_idx) {}
183 
184   // Set the memory node based on the current memory slice.
185   virtual void set_memory();
186 
187   Node* memory() const       { return _memory; }
188   uint alias_idx() const     { return _alias_idx; }
189 };
190 
191 // C2Access for optimization time calls to the BarrierSetC2 backend.
192 class C2OptAccess: public C2Access {
193   PhaseGVN& _gvn;
194   MergeMemNode* _mem;
195   Node* _ctl;
196 
197 public:
198   C2OptAccess(PhaseGVN& gvn, Node* ctl, MergeMemNode* mem, DecoratorSet decorators,
199               BasicType type, Node* base, C2AccessValuePtr& addr) :
200     C2Access(decorators, type, base, addr),
201     _gvn(gvn), _mem(mem), _ctl(ctl) {
202     fixup_decorators();
203   }
204 
205   MergeMemNode* mem() const { return _mem; }
206   Node* ctl() const { return _ctl; }
207 
208   virtual PhaseGVN& gvn() const { return _gvn; }
209   virtual bool is_opt_access() const { return true; }
210 };
211 
212 class BarrierSetC2State : public ArenaObj {
213 protected:
214   Node_Array                      _live;
215 
216 public:
217   BarrierSetC2State(Arena* arena) : _live(arena) {}
218 
219   RegMask* live(const Node* node) {
220     if (!node->is_Mach() || !needs_liveness_data(node->as_Mach())) {
221       // Don't need liveness for non-MachNodes or if the GC doesn't request it
222       return nullptr;
223     }
224     RegMask* live = (RegMask*)_live[node->_idx];
225     if (live == nullptr) {
226       live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask();
227       _live.map(node->_idx, (Node*)live);
228     }
229 
230     return live;
231   }
232 
233   virtual bool needs_liveness_data(const MachNode* mach) const = 0;
234   virtual bool needs_livein_data() const = 0;
235 };
236 
237 // This class represents the slow path in a C2 barrier. It is defined by a
238 // memory access, an entry point, and a continuation point (typically the end of
239 // the barrier). It provides a set of registers whose value is live across the
240 // barrier, and hence must be preserved across runtime calls from the stub.
241 class BarrierStubC2 : public ArenaObj {
242 protected:
243   const MachNode* _node;
244   Label           _entry;
245   Label           _continuation;
246   RegMask         _preserve;
247 
248   // Registers that are live-in/live-out of the entire memory access
249   // implementation (possibly including multiple barriers). Whether live-in or
250   // live-out registers are returned depends on
251   // BarrierSetC2State::needs_livein_data().
252   RegMask& live() const;
253 
254 public:
255   BarrierStubC2(const MachNode* node);
256 
257   // Entry point to the stub.
258   Label* entry();
259   // Return point from the stub (typically end of barrier).
260   Label* continuation();
261   // High-level, GC-specific barrier flags.
262   uint8_t barrier_data() const;
263 
264   // Preserve the value in reg across runtime calls in this barrier.
265   void preserve(Register reg);
266   // Do not preserve the value in reg across runtime calls in this barrier.
267   void dont_preserve(Register reg);
268   // Set of registers whose value needs to be preserved across runtime calls in this barrier.
269   const RegMask& preserve_set() const;
270 };
271 
272 // This is the top-level class for the backend of the Access API in C2.
273 // The top-level class is responsible for performing raw accesses. The
274 // various GC barrier sets inherit from the BarrierSetC2 class to sprinkle
275 // barriers into the accesses.
276 class BarrierSetC2: public CHeapObj<mtGC> {
277 protected:
278   virtual void resolve_address(C2Access& access) const;
279   virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
280   virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
281 
282   virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
283                                                Node* new_val, const Type* val_type) const;
284   virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
285                                                 Node* new_val, const Type* value_type) const;
286   virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
287   virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
288   void pin_atomic_op(C2AtomicParseAccess& access) const;
289   void clone_in_runtime(PhaseMacroExpand* phase, ArrayCopyNode* ac,
290                         address call_addr, const char* call_name) const;
291 
292 public:
293   // This is the entry-point for the backend to perform accesses through the Access API.
294   virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
295   virtual Node* load_at(C2Access& access, const Type* val_type) const;
296 
297   virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
298                                       Node* new_val, const Type* val_type) const;
299   virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
300                                        Node* new_val, const Type* val_type) const;
301   virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
302   virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
303 
304   virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const;
305 
306   virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
307                              Node*& i_o, Node*& needgc_ctrl,
308                              Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
309                              intx prefetch_lines) const;
310 
311   virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return nullptr; }
312 
313   // These are general helper methods used by C2
314   enum ArrayCopyPhase {
315     Parsing,
316     Optimization,
317     Expansion
318   };
319 
320   virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const { return false; }
321   virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
322 
323   // Support for GC barriers emitted during parsing
324   virtual bool has_load_barrier_nodes() const { return false; }
325   virtual bool is_gc_pre_barrier_node(Node* node) const { return false; }
326   virtual bool is_gc_barrier_node(Node* node) const { return false; }
327   virtual Node* step_over_gc_barrier(Node* c) const { return c; }
328 
329   // Support for macro expanded GC barriers
330   virtual void register_potential_barrier_node(Node* node) const { }
331   virtual void unregister_potential_barrier_node(Node* node) const { }
332   virtual void eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const { }
333   virtual void eliminate_gc_barrier_data(Node* node) const { }
334   virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
335   virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
336 
337   // Allow barrier sets to have shared state that is preserved across a compilation unit.
338   // This could for example comprise macro nodes to be expanded during macro expansion.
339   virtual void* create_barrier_state(Arena* comp_arena) const { return nullptr; }
340   // If the BarrierSetC2 state has barrier nodes in its compilation
341   // unit state to be expanded later, then now is the time to do so.
342   virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const { return false; }
343   virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { return false; }
344   virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return false; }
345   virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
346   // Estimated size of the node barrier in number of C2 Ideal nodes.
347   // This is used to guide heuristics in C2, e.g. whether to unroll a loop.
348   virtual uint estimated_barrier_size(const Node* node) const { return 0; }
349   // Whether the given store can be used to initialize a newly allocated object.
350   virtual bool can_initialize_object(const StoreNode* store) const { return true; }
351 
352   enum CompilePhase {
353     BeforeOptimize,
354     BeforeMacroExpand,
355     BeforeCodeGen
356   };
357 
358 #ifdef ASSERT
359   virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const {}
360 #endif
361 
362   virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode, Unique_Node_List& dead_nodes) const { return false; }
363 
364   virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { return false; }
365   virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { return false; }
366   virtual bool escape_has_out_with_unsafe_object(Node* n) const { return false; }
367 
368   virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { return false; };
369   virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const { return false; }
370 
371   virtual void late_barrier_analysis() const { }
372   virtual void compute_liveness_at_stubs() const;
373   virtual int estimate_stub_size() const { return 0; }
374   virtual void emit_stubs(CodeBuffer& cb) const { }
375 
376   static int arraycopy_payload_base_offset(bool is_array);
377 
378 #ifndef PRODUCT
379   virtual void dump_barrier_data(const MachNode* mach, outputStream* st) const {
380     st->print("%x", mach->barrier_data());
381   };
382 #endif
383 };
384 
385 #endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP