< prev index next > src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp
Print this page
#ifndef SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
#define SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
#include "gc/shared/c2/barrierSetC2.hpp"
! #include "gc/shenandoah/c2/shenandoahSupport.hpp"
#include "utilities/growableArray.hpp"
! class ShenandoahBarrierSetC2State : public ArenaObj {
! private:
! GrowableArray<ShenandoahLoadReferenceBarrierNode*>* _load_reference_barriers;
public:
! ShenandoahBarrierSetC2State(Arena* comp_arena);
! int load_reference_barriers_count() const;
! ShenandoahLoadReferenceBarrierNode* load_reference_barrier(int idx) const;
! void add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode* n);
! void remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n);
};
class ShenandoahBarrierSetC2 : public BarrierSetC2 {
! private:
! void shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const;
!
! bool satb_can_remove_pre_barrier(GraphKit* kit, PhaseValues* phase, Node* adr,
! BasicType bt, uint adr_idx) const;
! void satb_write_barrier_pre(GraphKit* kit, bool do_load,
! Node* obj,
- Node* adr,
- uint alias_idx,
- Node* val,
- const TypeOopPtr* val_type,
- Node* pre_val,
- BasicType bt) const;
-
- void shenandoah_write_barrier_pre(GraphKit* kit,
- bool do_load,
- Node* obj,
- Node* adr,
- uint alias_idx,
- Node* val,
- const TypeOopPtr* val_type,
- Node* pre_val,
- BasicType bt) const;
-
- void post_barrier(GraphKit* kit,
- Node* ctl,
- Node* store,
- Node* obj,
- Node* adr,
- uint adr_idx,
- Node* val,
- BasicType bt,
- bool use_precise) const;
-
- void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
- Node* pre_val, bool need_mem_bar) const;
-
- static bool clone_needs_barrier(Node* src, PhaseGVN& gvn);
protected:
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
#ifndef SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
#define SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
#include "gc/shared/c2/barrierSetC2.hpp"
! #include "gc/shared/gc_globals.hpp"
+ #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+ #include "gc/shenandoah/shenandoahRuntime.hpp"
+ #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "utilities/growableArray.hpp"
! static const uint8_t ShenandoahBitStrong = 1 << 0; // Barrier: LRB, strong
! static const uint8_t ShenandoahBitWeak = 1 << 1; // Barrier: LRB, weak
! static const uint8_t ShenandoahBitPhantom = 1 << 2; // Barrier: LRB, phantom
+ static const uint8_t ShenandoahBitKeepAlive = 1 << 3; // Barrier: KeepAlive (SATB for stores, KA for loads)
+ static const uint8_t ShenandoahBitCardMark = 1 << 4; // Barrier: CM
+ static const uint8_t ShenandoahBitNotNull = 1 << 5; // Metadata: src/dst is not null
+ static const uint8_t ShenandoahBitNative = 1 << 6; // Metadata: access is in native, not in heap
+ static const uint8_t ShenandoahBitElided = 1 << 7; // Metadata: barrier is elided
+
+ // Barrier data that implies real barriers, not additional metadata.
+ static const uint8_t ShenandoahBitsReal = ShenandoahBitStrong | ShenandoahBitWeak | ShenandoahBitPhantom |
+ ShenandoahBitKeepAlive |
+ ShenandoahBitCardMark;
+
+ class ShenandoahBarrierStubC2;
+
+ class ShenandoahBarrierSetC2State : public BarrierSetC2State {
+ GrowableArray<ShenandoahBarrierStubC2*>* _stubs;
+ int _stubs_start_offset;
public:
! explicit ShenandoahBarrierSetC2State(Arena* comp_arena);
+
+ bool needs_liveness_data(const MachNode* mach) const override;
+ bool needs_livein_data() const override;
+
+ GrowableArray<ShenandoahBarrierStubC2*>* stubs() {
+ return _stubs;
+ }
! void set_stubs_start_offset(int offset) {
! _stubs_start_offset = offset;
! }
!
+ int stubs_start_offset() {
+ return _stubs_start_offset;
+ }
};
class ShenandoahBarrierSetC2 : public BarrierSetC2 {
!
! static bool clone_needs_barrier(const TypeOopPtr* src_type, bool& is_oop_array);
!
! static bool can_remove_load_barrier(Node* node);
!
! static void refine_load(Node* node);
! static void refine_store(const Node* node);
protected:
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
public:
static ShenandoahBarrierSetC2* bsc2();
- static bool is_shenandoah_wb_pre_call(Node* call);
- static bool is_shenandoah_clone_call(Node* call);
- static bool is_shenandoah_lrb_call(Node* call);
- static bool is_shenandoah_marking_if(PhaseValues* phase, Node* n);
- static bool is_shenandoah_state_load(Node* n);
- static bool has_only_shenandoah_wb_pre_uses(Node* n);
-
ShenandoahBarrierSetC2State* state() const;
- static const TypeFunc* write_barrier_pre_Type();
- static const TypeFunc* clone_barrier_Type();
- static const TypeFunc* load_reference_barrier_Type();
- virtual bool has_load_barrier_nodes() const { return true; }
-
// This is the entry-point for the backend to perform accesses through the Access API.
virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
// These are general helper methods used by C2
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const;
// Support for GC barriers emitted during parsing
- virtual bool is_gc_pre_barrier_node(Node* node) const;
- virtual bool is_gc_barrier_node(Node* node) const;
- virtual Node* step_over_gc_barrier(Node* c) const;
virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const;
! virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const;
- virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return mode == LoopOptsShenandoahExpand; }
- virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return mode == LoopOptsShenandoahExpand; }
// Support for macro expanded GC barriers
- virtual void register_potential_barrier_node(Node* node) const;
- virtual void unregister_potential_barrier_node(Node* node) const;
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const;
! virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const;
- virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const;
// Allow barrier sets to have shared state that is preserved across a compilation unit.
// This could for example comprise macro nodes to be expanded during macro expansion.
virtual void* create_barrier_state(Arena* comp_arena) const;
- // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
- // expanded later, then now is the time to do so.
- virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;
#ifdef ASSERT
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
#endif
! virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const;
! virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode, Unique_Node_List& dead_nodes) const;
! virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
! virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const;
! virtual bool escape_has_out_with_unsafe_object(Node* n) const;
! virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const;
! virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const;
};
#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
public:
static ShenandoahBarrierSetC2* bsc2();
ShenandoahBarrierSetC2State* state() const;
// This is the entry-point for the backend to perform accesses through the Access API.
+ virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const;
virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
// These are general helper methods used by C2
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const;
// Support for GC barriers emitted during parsing
virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const;
! virtual void final_refinement(Compile* C) const;
// Support for macro expanded GC barriers
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const;
! virtual void eliminate_gc_barrier_data(Node* node) const;
// Allow barrier sets to have shared state that is preserved across a compilation unit.
// This could for example comprise macro nodes to be expanded during macro expansion.
virtual void* create_barrier_state(Arena* comp_arena) const;
#ifdef ASSERT
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
+ static void verify_gc_barrier_assert(bool cond, const char* msg, uint8_t bd, Node* n);
#endif
! int estimate_stub_size() const /* override */;
! void emit_stubs(CodeBuffer& cb) const /* override */;
+ void late_barrier_analysis() const /* override*/ {
+ compute_liveness_at_stubs();
+ analyze_dominating_barriers();
+ }
+
+ void elide_dominated_barrier(MachNode* mach) const;
+ void analyze_dominating_barriers() const;
+
+ virtual uint estimated_barrier_size(const Node* node) const;
+
+ static void print_barrier_data(outputStream* os, uint8_t data);
+ };
+
+ class ShenandoahBarrierStubC2 : public BarrierStubC2 {
+ protected:
+ explicit ShenandoahBarrierStubC2(const MachNode* node) : BarrierStubC2(node) {
+ assert(!ShenandoahSkipBarriers, "Do not touch stubs when disabled");
+ }
+ void register_stub();
+ static bool is_heap_access(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitNative) == 0;
+ }
+ void satb(MacroAssembler* masm, ShenandoahBarrierStubC2* stub, Register scratch1, Register scratch2, Register scratch3, Label* L_done);
+ void lrb(MacroAssembler* masm, ShenandoahBarrierStubC2* stub, Register obj, Register addr, Label* L_done, bool narrow);
+ static Register select_temp_register(Address addr, Register reg1 = noreg, Register reg2 = noreg);
+
+ public:
+ virtual void emit_code(MacroAssembler& masm) = 0;
+ };
+
+ class ShenandoahLoadBarrierStubC2 : public ShenandoahBarrierStubC2 {
+ Register const _dst;
+ Register _addr_reg; // Used on x64
+ Address const _src; // Used on aarch64
+ const bool _narrow;
+ const bool _maybe_null;
+ const bool _needs_load_ref_barrier;
+ const bool _needs_keep_alive_barrier;
+
+ ShenandoahLoadBarrierStubC2(const MachNode* node, Register dst, Register addr_reg, Address src) :
+ ShenandoahBarrierStubC2(node), _dst(dst), _addr_reg(addr_reg), _src(src), _narrow(is_narrow_result(node)),
+ _maybe_null(!src_not_null(node)), _needs_load_ref_barrier(needs_load_ref_barrier(node)),
+ _needs_keep_alive_barrier(needs_keep_alive_barrier(node)) {
+ assert(!_narrow || is_heap_access(node), "Only heap accesses can be narrow");
+ }
+
+ public:
+ static bool needs_barrier(const MachNode* node) {
+ return needs_load_ref_barrier(node) || needs_keep_alive_barrier(node);
+ }
+ static bool needs_keep_alive_barrier(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitKeepAlive) != 0;
+ }
+ static bool needs_load_ref_barrier(const MachNode* node) {
+ return (node->barrier_data() & (ShenandoahBitStrong | ShenandoahBitWeak | ShenandoahBitPhantom)) != 0;
+ }
+ static bool needs_load_ref_barrier_weak(const MachNode* node) {
+ return (node->barrier_data() & (ShenandoahBitWeak | ShenandoahBitPhantom)) != 0;
+ }
+ static bool src_not_null(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitNotNull) != 0;
+ }
+ static bool is_narrow_result(const MachNode* node) {
+ return node->bottom_type()->isa_narrowoop();
+ }
+
+ static ShenandoahLoadBarrierStubC2* create(const MachNode* node, Register dst, Address src);
+ static ShenandoahLoadBarrierStubC2* create(const MachNode* node, Register dst, Register addr);
+
+ void emit_code(MacroAssembler& masm) override;
+ };
! class ShenandoahStoreBarrierStubC2 : public ShenandoahBarrierStubC2 {
! Register const _addr_reg; // Used on aarch64
! Address const _dst; // Used on x64
+ Register const _src;
+ Register const _tmp;
+ const bool _dst_narrow;
+ const bool _src_narrow;
! ShenandoahStoreBarrierStubC2(const MachNode* node, Register addr_reg, Address dst, bool dst_narrow, Register src, bool src_narrow, Register tmp) :
! ShenandoahBarrierStubC2(node), _addr_reg(addr_reg), _dst(dst), _src(src), _tmp(tmp), _dst_narrow(dst_narrow), _src_narrow(src_narrow) {
+ assert(!_dst_narrow || is_heap_access(node), "Only heap accesses can be narrow");
+ }
+
+ public:
+ static bool needs_barrier(const MachNode* node) {
+ return needs_card_barrier(node) || needs_keep_alive_barrier(node);
+ }
+ static bool needs_keep_alive_barrier(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitKeepAlive) != 0;
+ }
+ static bool needs_card_barrier(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitCardMark) != 0;
+ }
+ static bool src_not_null(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitNotNull) != 0;
+ }
+
+ static ShenandoahStoreBarrierStubC2* create(const MachNode* node, Address dst, bool dst_narrow, Register src, bool src_narrow, Register tmp);
+ static ShenandoahStoreBarrierStubC2* create(const MachNode* node, Register addr, bool dst_narrow);
+
+ void emit_code(MacroAssembler& masm) override;
};
+ class ShenandoahCASBarrierStubC2 : public ShenandoahBarrierStubC2 {
+ Register _addr_reg; // Used on aarch64
+ Address _addr; // Used on x64
+ Register _expected;
+ Register _new_val;
+ Register _result;
+ Register _tmp1;
+ Register _tmp2;
+ bool const _narrow;
+ bool const _cae;
+ bool const _maybe_null;
+ bool const _acquire;
+ bool const _release;
+ bool const _weak;
+
+ explicit ShenandoahCASBarrierStubC2(const MachNode* node, Register addr_reg, Address addr, Register expected, Register new_val, Register result, Register tmp1, Register tmp2, bool narrow, bool cae, bool maybe_null, bool acquire, bool release, bool weak) :
+ ShenandoahBarrierStubC2(node),
+ _addr_reg(addr_reg), _addr(addr), _expected(expected), _new_val(new_val), _result(result), _tmp1(tmp1), _tmp2(tmp2), _narrow(narrow), _cae(cae), _maybe_null(maybe_null), _acquire(acquire), _release(release), _weak(weak) {
+ assert(!_narrow || is_heap_access(node), "Only heap accesses can be narrow");
+ }
+
+ public:
+ static bool needs_barrier(const MachNode* node) {
+ return needs_card_barrier(node) || needs_load_ref_barrier(node) || needs_keep_alive_barrier(node);
+ }
+ static bool needs_keep_alive_barrier(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitKeepAlive) != 0;
+ }
+ static bool needs_card_barrier(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitCardMark) != 0;
+ }
+ static bool needs_load_ref_barrier(const MachNode* node) {
+ return (node->barrier_data() & ShenandoahBitStrong) != 0;
+ }
+
+ static ShenandoahCASBarrierStubC2* create(const MachNode* node, Register addr, Register expected, Register new_val, Register result, bool narrow, bool cae, bool maybe_null, bool acquire, bool release, bool weak);
+ static ShenandoahCASBarrierStubC2* create(const MachNode* node, Address addr, Register expected, Register new_val, Register result, Register tmp1, Register tmp2, bool narrow, bool cae);
+ void emit_code(MacroAssembler& masm) override;
+ };
#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
< prev index next >