< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.hpp

Print this page

129   Node* raw_access() const        { return _raw_access; }
130 
131   uint8_t barrier_data() const        { return _barrier_data; }
132   void set_barrier_data(uint8_t data) { _barrier_data = data; }
133 
134   void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
135   virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
136 
137   MemNode::MemOrd mem_node_mo() const;
138   bool needs_cpu_membar() const;
139 
140   virtual PhaseGVN& gvn() const = 0;
141   virtual bool is_parse_access() const { return false; }
142   virtual bool is_opt_access() const { return false; }
143 };
144 
145 // C2Access for parse time calls to the BarrierSetC2 backend.
146 class C2ParseAccess: public C2Access {
147 protected:
148   GraphKit*         _kit;

149 
150   void* barrier_set_state() const;
151 
152 public:
153   C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
154                 BasicType type, Node* base, C2AccessValuePtr& addr) :

155     C2Access(decorators, type, base, addr),
156     _kit(kit) {

157     fixup_decorators();
158   }
159 
160   GraphKit* kit() const           { return _kit; }

161 
162   virtual PhaseGVN& gvn() const;
163   virtual bool is_parse_access() const { return true; }
164 };
165 
166 // This class wraps a bunch of context parameters thare are passed around in the
167 // BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
168 class C2AtomicParseAccess: public C2ParseAccess {
169   Node* _memory;
170   uint  _alias_idx;
171   bool  _needs_pinning;
172 
173 public:
174   C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
175                  Node* base, C2AccessValuePtr& addr, uint alias_idx) :
176     C2ParseAccess(kit, decorators, type, base, addr),
177     _memory(NULL),
178     _alias_idx(alias_idx),
179     _needs_pinning(true) {}
180 

221   virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
222                                                Node* new_val, const Type* val_type) const;
223   virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
224                                                 Node* new_val, const Type* value_type) const;
225   virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
226   virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
227   void pin_atomic_op(C2AtomicParseAccess& access) const;
228 
229 public:
230   // This is the entry-point for the backend to perform accesses through the Access API.
231   virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
232   virtual Node* load_at(C2Access& access, const Type* val_type) const;
233 
234   virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
235                                       Node* new_val, const Type* val_type) const;
236   virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
237                                        Node* new_val, const Type* val_type) const;
238   virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
239   virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
240 
241   virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
242 
243   virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
244                              Node*& i_o, Node*& needgc_ctrl,
245                              Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
246                              intx prefetch_lines) const;
247 
248   virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return NULL; }
249 
250   // These are general helper methods used by C2
251   enum ArrayCopyPhase {
252     Parsing,
253     Optimization,
254     Expansion
255   };
256 
257   virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const { return false; }
258   virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
259 
260   // Support for GC barriers emitted during parsing
261   virtual bool has_load_barrier_nodes() const { return false; }
262   virtual bool is_gc_barrier_node(Node* node) const { return false; }
263   virtual Node* step_over_gc_barrier(Node* c) const { return c; }
264 
265   // Support for macro expanded GC barriers
266   virtual void register_potential_barrier_node(Node* node) const { }
267   virtual void unregister_potential_barrier_node(Node* node) const { }
268   virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
269   virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
270   virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
271 
272   // Allow barrier sets to have shared state that is preserved across a compilation unit.
273   // This could for example comprise macro nodes to be expanded during macro expansion.
274   virtual void* create_barrier_state(Arena* comp_arena) const { return NULL; }
275   // If the BarrierSetC2 state has barrier nodes in its compilation
276   // unit state to be expanded later, then now is the time to do so.
277   virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const { return false; }
278   virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { return false; }
279   virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return false; }
280   virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
281 
282   enum CompilePhase {
283     BeforeOptimize,
284     BeforeMacroExpand,
285     BeforeCodeGen
286   };
287 
288 #ifdef ASSERT

129   Node* raw_access() const        { return _raw_access; }
130 
131   uint8_t barrier_data() const        { return _barrier_data; }
132   void set_barrier_data(uint8_t data) { _barrier_data = data; }
133 
134   void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
135   virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
136 
137   MemNode::MemOrd mem_node_mo() const;
138   bool needs_cpu_membar() const;
139 
140   virtual PhaseGVN& gvn() const = 0;
141   virtual bool is_parse_access() const { return false; }
142   virtual bool is_opt_access() const { return false; }
143 };
144 
145 // C2Access for parse time calls to the BarrierSetC2 backend.
146 class C2ParseAccess: public C2Access {
147 protected:
148   GraphKit*         _kit;
149   Node* _ctl;
150 
151   void* barrier_set_state() const;
152 
153 public:
154   C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
155                 BasicType type, Node* base, C2AccessValuePtr& addr,
156                 Node* ctl = NULL) :
157     C2Access(decorators, type, base, addr),
158     _kit(kit),
159     _ctl(ctl) {
160     fixup_decorators();
161   }
162 
163   GraphKit* kit() const           { return _kit; }
164   Node* control() const;
165 
166   virtual PhaseGVN& gvn() const;
167   virtual bool is_parse_access() const { return true; }
168 };
169 
170 // This class wraps a bunch of context parameters thare are passed around in the
171 // BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
172 class C2AtomicParseAccess: public C2ParseAccess {
173   Node* _memory;
174   uint  _alias_idx;
175   bool  _needs_pinning;
176 
177 public:
178   C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
179                  Node* base, C2AccessValuePtr& addr, uint alias_idx) :
180     C2ParseAccess(kit, decorators, type, base, addr),
181     _memory(NULL),
182     _alias_idx(alias_idx),
183     _needs_pinning(true) {}
184 

225   virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
226                                                Node* new_val, const Type* val_type) const;
227   virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
228                                                 Node* new_val, const Type* value_type) const;
229   virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
230   virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
231   void pin_atomic_op(C2AtomicParseAccess& access) const;
232 
233 public:
234   // This is the entry-point for the backend to perform accesses through the Access API.
235   virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
236   virtual Node* load_at(C2Access& access, const Type* val_type) const;
237 
238   virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
239                                       Node* new_val, const Type* val_type) const;
240   virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
241                                        Node* new_val, const Type* val_type) const;
242   virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
243   virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
244 
245   virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const;
246 
247   virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
248                              Node*& i_o, Node*& needgc_ctrl,
249                              Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
250                              intx prefetch_lines) const;
251 
252   virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return NULL; }
253 
254   // These are general helper methods used by C2
255   enum ArrayCopyPhase {
256     Parsing,
257     Optimization,
258     Expansion
259   };
260 
261   virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const { return false; }
262   virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
263 
264   // Support for GC barriers emitted during parsing
265   virtual bool has_load_barrier_nodes() const { return false; }
266   virtual bool is_gc_barrier_node(Node* node) const { return false; }
267   virtual Node* step_over_gc_barrier(Node* c) const { return c; }
268 
269   // Support for macro expanded GC barriers
270   virtual void register_potential_barrier_node(Node* node) const { }
271   virtual void unregister_potential_barrier_node(Node* node) const { }
272   virtual void eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const { }
273   virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
274   virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
275 
276   // Allow barrier sets to have shared state that is preserved across a compilation unit.
277   // This could for example comprise macro nodes to be expanded during macro expansion.
278   virtual void* create_barrier_state(Arena* comp_arena) const { return NULL; }
279   // If the BarrierSetC2 state has barrier nodes in its compilation
280   // unit state to be expanded later, then now is the time to do so.
281   virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const { return false; }
282   virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { return false; }
283   virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return false; }
284   virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
285 
286   enum CompilePhase {
287     BeforeOptimize,
288     BeforeMacroExpand,
289     BeforeCodeGen
290   };
291 
292 #ifdef ASSERT
< prev index next >