66 return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
67 }
68
69 template <DecoratorSet decorators, typename T>
70 inline void write_ref_field_pre(T* addr) {}
71
72 // Record a reference update. Note that these versions are precise!
73 // The scanning code has to handle the fact that the write barrier may be
74 // either precise or imprecise. We make non-virtual inline variants of
75 // these functions here for performance.
76 template <DecoratorSet decorators, typename T>
77 inline void write_ref_field_post(T *addr);
78
79 // Causes all refs in "mr" to be assumed to be modified (by this JavaThread).
80 virtual void write_region(MemRegion mr);
81
82 // Operations on arrays, or general regions (e.g., for "clone") may be
83 // optimized by some barriers.
84
85 // Below length is the # array elements being written
86 virtual void write_ref_array_pre(oop* dst, size_t length,
87 bool dest_uninitialized) {}
88 virtual void write_ref_array_pre(narrowOop* dst, size_t length,
89 bool dest_uninitialized) {}
90 // Below count is the # array elements being written, starting
91 // at the address "start", which may not necessarily be HeapWord-aligned
92 inline void write_ref_array(HeapWord* start, size_t count);
93
94 CardTable* card_table() { return _card_table.load_relaxed(); }
95 CardTable* card_table() const { return _card_table.load_relaxed(); }
96
97 CardValue* card_table_base_const() const {
98 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
99 return card_table()->byte_map_base();
100 }
101
102 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
103
104 virtual void print_on(outputStream* st) const;
105
106 // The AOT code cache manager needs to know the region grain size
107 // shift for some barrier sets.
108 virtual uint grain_shift() { return 0; }
109
110 template <DecoratorSet decorators, typename BarrierSetT = CardTableBarrierSet>
111 class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
112 typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
113
114 public:
115 template <typename T>
116 static void oop_store_in_heap(T* addr, oop value);
117 template <typename T>
118 static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
119 template <typename T>
120 static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
121
122 template <typename T>
123 static OopCopyResult oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
124 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
125 size_t length);
126
127 static void clone_in_heap(oop src, oop dst, size_t size);
128
129 static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
130 oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
131 }
132
133 static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
134 return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
135 }
136
137 static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
138 return oop_atomic_cmpxchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), compare_value, new_value);
139 }
140 };
141 };
142
143 template<>
144 struct BarrierSet::GetName<CardTableBarrierSet> {
145 static const BarrierSet::Name value = BarrierSet::CardTableBarrierSet;
146 };
147
148 template<>
149 struct BarrierSet::GetType<BarrierSet::CardTableBarrierSet> {
150 typedef ::CardTableBarrierSet type;
151 };
152
153 #endif // SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP
|
66 return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
67 }
68
69 template <DecoratorSet decorators, typename T>
70 inline void write_ref_field_pre(T* addr) {}
71
72 // Record a reference update. Note that these versions are precise!
73 // The scanning code has to handle the fact that the write barrier may be
74 // either precise or imprecise. We make non-virtual inline variants of
75 // these functions here for performance.
76 template <DecoratorSet decorators, typename T>
77 inline void write_ref_field_post(T *addr);
78
79 // Causes all refs in "mr" to be assumed to be modified (by this JavaThread).
80 virtual void write_region(MemRegion mr);
81
82 // Operations on arrays, or general regions (e.g., for "clone") may be
83 // optimized by some barriers.
84
85 // Below length is the # array elements being written
86 virtual void write_ref_array_pre(oop* dst, size_t length) {}
87 virtual void write_ref_array_pre(narrowOop* dst, size_t length) {}
88 // Below count is the # array elements being written, starting
89 // at the address "start", which may not necessarily be HeapWord-aligned
90 inline void write_ref_array(HeapWord* start, size_t count);
91
92 CardTable* card_table() { return _card_table.load_relaxed(); }
93 CardTable* card_table() const { return _card_table.load_relaxed(); }
94
95 CardValue* card_table_base_const() const {
96 assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
97 return card_table()->byte_map_base();
98 }
99
100 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
101
102 virtual void print_on(outputStream* st) const;
103
104 // The AOT code cache manager needs to know the region grain size
105 // shift for some barrier sets.
106 virtual uint grain_shift() { return 0; }
107
108 template <DecoratorSet decorators, typename BarrierSetT = CardTableBarrierSet>
109 class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
110 typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
111
112 public:
113 template <typename T>
114 static void oop_store_in_heap(T* addr, oop value);
115 template <typename T>
116 static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
117 template <typename T>
118 static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
119
120 template <typename T>
121 static OopCopyResult oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
122 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
123 size_t length);
124 private:
125 // Failing checkcast or check null during copy, still needs barrier
126 template <typename T>
127 static inline void oop_arraycopy_partial_barrier(BarrierSetT *bs, T* dst_raw, T* p);
128 public:
129
130 static void clone_in_heap(oop src, oop dst, size_t size);
131
132 static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
133 oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
134 }
135
136 static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
137 return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
138 }
139
140 static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
141 return oop_atomic_cmpxchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), compare_value, new_value);
142 }
143
144 static void value_copy_in_heap(const ValuePayload& src, const ValuePayload& dst);
145 static void value_store_null_in_heap(const ValuePayload& dst);
146 };
147 };
148
149 template<>
150 struct BarrierSet::GetName<CardTableBarrierSet> {
151 static const BarrierSet::Name value = BarrierSet::CardTableBarrierSet;
152 };
153
154 template<>
155 struct BarrierSet::GetType<BarrierSet::CardTableBarrierSet> {
156 typedef ::CardTableBarrierSet type;
157 };
158
159 #endif // SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP
|