130 Node* raw_access() const { return _raw_access; }
131
132 uint8_t barrier_data() const { return _barrier_data; }
133 void set_barrier_data(uint8_t data) { _barrier_data = data; }
134
135 void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
136 virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
137
138 MemNode::MemOrd mem_node_mo() const;
139 bool needs_cpu_membar() const;
140
141 virtual PhaseGVN& gvn() const = 0;
142 virtual bool is_parse_access() const { return false; }
143 virtual bool is_opt_access() const { return false; }
144 };
145
146 // C2Access for parse time calls to the BarrierSetC2 backend.
147 class C2ParseAccess: public C2Access {
148 protected:
149 GraphKit* _kit;
150
151 void* barrier_set_state() const;
152
153 public:
154 C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
155 BasicType type, Node* base, C2AccessValuePtr& addr) :
156 C2Access(decorators, type, base, addr),
157 _kit(kit) {
158 fixup_decorators();
159 }
160
161 GraphKit* kit() const { return _kit; }
162
163 virtual PhaseGVN& gvn() const;
164 virtual bool is_parse_access() const { return true; }
165 };
166
167 // This class wraps a bunch of context parameters that are passed around in the
168 // BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
169 class C2AtomicParseAccess: public C2ParseAccess {
170 Node* _memory;
171 uint _alias_idx;
172
173 public:
174 C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
175 Node* base, C2AccessValuePtr& addr, uint alias_idx) :
176 C2ParseAccess(kit, decorators, type, base, addr),
177 _memory(nullptr),
178 _alias_idx(alias_idx) {}
179
180 // Set the memory node based on the current memory slice.
181 virtual void set_memory();
219 virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
220 Node* new_val, const Type* val_type) const;
221 virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
222 Node* new_val, const Type* value_type) const;
223 virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
224 virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
225 void pin_atomic_op(C2AtomicParseAccess& access) const;
226
227 public:
228 // This is the entry-point for the backend to perform accesses through the Access API.
229 virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
230 virtual Node* load_at(C2Access& access, const Type* val_type) const;
231
232 virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
233 Node* new_val, const Type* val_type) const;
234 virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
235 Node* new_val, const Type* val_type) const;
236 virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
237 virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
238
239 virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
240
241 virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
242 Node*& i_o, Node*& needgc_ctrl,
243 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
244 intx prefetch_lines) const;
245
246 virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return nullptr; }
247
248 // These are general helper methods used by C2
249 enum ArrayCopyPhase {
250 Parsing,
251 Optimization,
252 Expansion
253 };
254
255 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const { return false; }
256 virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
257
258 // Support for GC barriers emitted during parsing
259 virtual bool has_load_barrier_nodes() const { return false; }
260 virtual bool is_gc_pre_barrier_node(Node* node) const { return false; }
261 virtual bool is_gc_barrier_node(Node* node) const { return false; }
262 virtual Node* step_over_gc_barrier(Node* c) const { return c; }
263
264 // Support for macro expanded GC barriers
265 virtual void register_potential_barrier_node(Node* node) const { }
266 virtual void unregister_potential_barrier_node(Node* node) const { }
267 virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
268 virtual void eliminate_gc_barrier_data(Node* node) const { }
269 virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
270 virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
271
272 // Allow barrier sets to have shared state that is preserved across a compilation unit.
273 // This could for example comprise macro nodes to be expanded during macro expansion.
274 virtual void* create_barrier_state(Arena* comp_arena) const { return nullptr; }
275 // If the BarrierSetC2 state has barrier nodes in its compilation
276 // unit state to be expanded later, then now is the time to do so.
277 virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const { return false; }
278 virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { return false; }
279 virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return false; }
280 virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
281
282 enum CompilePhase {
283 BeforeOptimize,
284 BeforeMacroExpand,
285 BeforeCodeGen
286 };
287
|
130 Node* raw_access() const { return _raw_access; }
131
132 uint8_t barrier_data() const { return _barrier_data; }
133 void set_barrier_data(uint8_t data) { _barrier_data = data; }
134
135 void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
136 virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
137
138 MemNode::MemOrd mem_node_mo() const;
139 bool needs_cpu_membar() const;
140
141 virtual PhaseGVN& gvn() const = 0;
142 virtual bool is_parse_access() const { return false; }
143 virtual bool is_opt_access() const { return false; }
144 };
145
146 // C2Access for parse time calls to the BarrierSetC2 backend.
147 class C2ParseAccess: public C2Access {
148 protected:
149 GraphKit* _kit;
150 Node* _ctl;
151
152 void* barrier_set_state() const;
153
154 public:
155 C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
156 BasicType type, Node* base, C2AccessValuePtr& addr,
157 Node* ctl = nullptr) :
158 C2Access(decorators, type, base, addr),
159 _kit(kit),
160 _ctl(ctl) {
161 fixup_decorators();
162 }
163
164 GraphKit* kit() const { return _kit; }
165 Node* control() const;
166
167 virtual PhaseGVN& gvn() const;
168 virtual bool is_parse_access() const { return true; }
169 };
170
171 // This class wraps a bunch of context parameters that are passed around in the
172 // BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
173 class C2AtomicParseAccess: public C2ParseAccess {
174 Node* _memory;
175 uint _alias_idx;
176
177 public:
178 C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
179 Node* base, C2AccessValuePtr& addr, uint alias_idx) :
180 C2ParseAccess(kit, decorators, type, base, addr),
181 _memory(nullptr),
182 _alias_idx(alias_idx) {}
183
184 // Set the memory node based on the current memory slice.
185 virtual void set_memory();
223 virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
224 Node* new_val, const Type* val_type) const;
225 virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
226 Node* new_val, const Type* value_type) const;
227 virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
228 virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
229 void pin_atomic_op(C2AtomicParseAccess& access) const;
230
231 public:
232 // This is the entry-point for the backend to perform accesses through the Access API.
233 virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
234 virtual Node* load_at(C2Access& access, const Type* val_type) const;
235
236 virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
237 Node* new_val, const Type* val_type) const;
238 virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
239 Node* new_val, const Type* val_type) const;
240 virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
241 virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
242
243 virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const;
244
245 virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
246 Node*& i_o, Node*& needgc_ctrl,
247 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
248 intx prefetch_lines) const;
249
250 virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return nullptr; }
251
252 // These are general helper methods used by C2
253 enum ArrayCopyPhase {
254 Parsing,
255 Optimization,
256 Expansion
257 };
258
259 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const { return false; }
260 virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
261
262 // Support for GC barriers emitted during parsing
263 virtual bool has_load_barrier_nodes() const { return false; }
264 virtual bool is_gc_pre_barrier_node(Node* node) const { return false; }
265 virtual bool is_gc_barrier_node(Node* node) const { return false; }
266 virtual Node* step_over_gc_barrier(Node* c) const { return c; }
267
268 // Support for macro expanded GC barriers
269 virtual void register_potential_barrier_node(Node* node) const { }
270 virtual void unregister_potential_barrier_node(Node* node) const { }
271 virtual void eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const { }
272 virtual void eliminate_gc_barrier_data(Node* node) const { }
273 virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
274 virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
275
276 // Allow barrier sets to have shared state that is preserved across a compilation unit.
277 // This could for example comprise macro nodes to be expanded during macro expansion.
278 virtual void* create_barrier_state(Arena* comp_arena) const { return nullptr; }
279 // If the BarrierSetC2 state has barrier nodes in its compilation
280 // unit state to be expanded later, then now is the time to do so.
281 virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const { return false; }
282 virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { return false; }
283 virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return false; }
284 virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
285
286 enum CompilePhase {
287 BeforeOptimize,
288 BeforeMacroExpand,
289 BeforeCodeGen
290 };
291
|