130 Node* raw_access() const { return _raw_access; }
131
132 uint8_t barrier_data() const { return _barrier_data; }
133 void set_barrier_data(uint8_t data) { _barrier_data = data; }
134
135 void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
136 virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
137
138 MemNode::MemOrd mem_node_mo() const;
139 bool needs_cpu_membar() const;
140
141 virtual PhaseGVN& gvn() const = 0;
142 virtual bool is_parse_access() const { return false; }
143 virtual bool is_opt_access() const { return false; }
144 };
145
146 // C2Access for parse time calls to the BarrierSetC2 backend.
147 class C2ParseAccess: public C2Access {
148 protected:
149 GraphKit* _kit;
150
151 void* barrier_set_state() const;
152
153 public:
154 C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
155 BasicType type, Node* base, C2AccessValuePtr& addr) :
156 C2Access(decorators, type, base, addr),
157 _kit(kit) {
158 fixup_decorators();
159 }
160
161 GraphKit* kit() const { return _kit; }
162
163 virtual PhaseGVN& gvn() const;
164 virtual bool is_parse_access() const { return true; }
165 };
166
167 // This class wraps a bunch of context parameters that are passed around in the
168 // BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
169 class C2AtomicParseAccess: public C2ParseAccess {
170 Node* _memory;
171 uint _alias_idx;
172
173 public:
174 C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
175 Node* base, C2AccessValuePtr& addr, uint alias_idx) :
176 C2ParseAccess(kit, decorators, type, base, addr),
177 _memory(nullptr),
178 _alias_idx(alias_idx) {}
179
180 // Set the memory node based on the current memory slice.
181 virtual void set_memory();
280 virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
281 Node* new_val, const Type* value_type) const;
282 virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
283 virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
284 void pin_atomic_op(C2AtomicParseAccess& access) const;
285 void clone_in_runtime(PhaseMacroExpand* phase, ArrayCopyNode* ac,
286 address call_addr, const char* call_name) const;
287
288 public:
289 // This is the entry-point for the backend to perform accesses through the Access API.
290 virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
291 virtual Node* load_at(C2Access& access, const Type* val_type) const;
292
293 virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
294 Node* new_val, const Type* val_type) const;
295 virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
296 Node* new_val, const Type* val_type) const;
297 virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
298 virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
299
300 virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
301
302 virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
303 Node*& i_o, Node*& needgc_ctrl,
304 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
305 intx prefetch_lines) const;
306
307 virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return nullptr; }
308
309 // These are general helper methods used by C2
310 enum ArrayCopyPhase {
311 Parsing,
312 Optimization,
313 Expansion
314 };
315
316 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const { return false; }
317 virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
318
319 // Support for GC barriers emitted during parsing
320 virtual bool has_load_barrier_nodes() const { return false; }
321 virtual bool is_gc_pre_barrier_node(Node* node) const { return false; }
322 virtual bool is_gc_barrier_node(Node* node) const { return false; }
323 virtual Node* step_over_gc_barrier(Node* c) const { return c; }
324
325 // Support for macro expanded GC barriers
326 virtual void register_potential_barrier_node(Node* node) const { }
327 virtual void unregister_potential_barrier_node(Node* node) const { }
328 virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
329 virtual void eliminate_gc_barrier_data(Node* node) const { }
330 virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
331 virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
332
333 // Allow barrier sets to have shared state that is preserved across a compilation unit.
334 // This could for example comprise macro nodes to be expanded during macro expansion.
335 virtual void* create_barrier_state(Arena* comp_arena) const { return nullptr; }
336 // If the BarrierSetC2 state has barrier nodes in its compilation
337 // unit state to be expanded later, then now is the time to do so.
338 virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const { return false; }
339 virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { return false; }
340 virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return false; }
341 virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
342 // Estimated size of the node barrier in number of C2 Ideal nodes.
343 // This is used to guide heuristics in C2, e.g. whether to unroll a loop.
344 virtual uint estimated_barrier_size(const Node* node) const { return 0; }
345 // Whether the given store can be used to initialize a newly allocated object.
346 virtual bool can_initialize_object(const StoreNode* store) const { return true; }
347
348 enum CompilePhase {
|
130 Node* raw_access() const { return _raw_access; }
131
132 uint8_t barrier_data() const { return _barrier_data; }
133 void set_barrier_data(uint8_t data) { _barrier_data = data; }
134
135 void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
136 virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
137
138 MemNode::MemOrd mem_node_mo() const;
139 bool needs_cpu_membar() const;
140
141 virtual PhaseGVN& gvn() const = 0;
142 virtual bool is_parse_access() const { return false; }
143 virtual bool is_opt_access() const { return false; }
144 };
145
146 // C2Access for parse time calls to the BarrierSetC2 backend.
147 class C2ParseAccess: public C2Access {
148 protected:
149 GraphKit* _kit;
150 Node* _ctl;
151
152 void* barrier_set_state() const;
153
154 public:
155 C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
156 BasicType type, Node* base, C2AccessValuePtr& addr,
157 Node* ctl = nullptr) :
158 C2Access(decorators, type, base, addr),
159 _kit(kit),
160 _ctl(ctl) {
161 fixup_decorators();
162 }
163
164 GraphKit* kit() const { return _kit; }
165 Node* control() const;
166
167 virtual PhaseGVN& gvn() const;
168 virtual bool is_parse_access() const { return true; }
169 };
170
171 // This class wraps a bunch of context parameters that are passed around in the
172 // BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
173 class C2AtomicParseAccess: public C2ParseAccess {
174 Node* _memory;
175 uint _alias_idx;
176
177 public:
178 C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
179 Node* base, C2AccessValuePtr& addr, uint alias_idx) :
180 C2ParseAccess(kit, decorators, type, base, addr),
181 _memory(nullptr),
182 _alias_idx(alias_idx) {}
183
184 // Set the memory node based on the current memory slice.
185 virtual void set_memory();
284 virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
285 Node* new_val, const Type* value_type) const;
286 virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
287 virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
288 void pin_atomic_op(C2AtomicParseAccess& access) const;
289 void clone_in_runtime(PhaseMacroExpand* phase, ArrayCopyNode* ac,
290 address call_addr, const char* call_name) const;
291
292 public:
293 // This is the entry-point for the backend to perform accesses through the Access API.
294 virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
295 virtual Node* load_at(C2Access& access, const Type* val_type) const;
296
297 virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
298 Node* new_val, const Type* val_type) const;
299 virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
300 Node* new_val, const Type* val_type) const;
301 virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
302 virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
303
304 virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const;
305
306 virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
307 Node*& i_o, Node*& needgc_ctrl,
308 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
309 intx prefetch_lines) const;
310
311 virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return nullptr; }
312
313 // These are general helper methods used by C2
314 enum ArrayCopyPhase {
315 Parsing,
316 Optimization,
317 Expansion
318 };
319
320 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const { return false; }
321 virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
322
323 // Support for GC barriers emitted during parsing
324 virtual bool has_load_barrier_nodes() const { return false; }
325 virtual bool is_gc_pre_barrier_node(Node* node) const { return false; }
326 virtual bool is_gc_barrier_node(Node* node) const { return false; }
327 virtual Node* step_over_gc_barrier(Node* c) const { return c; }
328
329 // Support for macro expanded GC barriers
330 virtual void register_potential_barrier_node(Node* node) const { }
331 virtual void unregister_potential_barrier_node(Node* node) const { }
332 virtual void eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const { }
333 virtual void eliminate_gc_barrier_data(Node* node) const { }
334 virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
335 virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
336
337 // Allow barrier sets to have shared state that is preserved across a compilation unit.
338 // This could for example comprise macro nodes to be expanded during macro expansion.
339 virtual void* create_barrier_state(Arena* comp_arena) const { return nullptr; }
340 // If the BarrierSetC2 state has barrier nodes in its compilation
341 // unit state to be expanded later, then now is the time to do so.
342 virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const { return false; }
343 virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { return false; }
344 virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return false; }
345 virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
346 // Estimated size of the node barrier in number of C2 Ideal nodes.
347 // This is used to guide heuristics in C2, e.g. whether to unroll a loop.
348 virtual uint estimated_barrier_size(const Node* node) const { return 0; }
349 // Whether the given store can be used to initialize a newly allocated object.
350 virtual bool can_initialize_object(const StoreNode* store) const { return true; }
351
352 enum CompilePhase {
|