29 #include "gc/shared/gc_globals.hpp"
30 #include "opto/arraycopynode.hpp"
31 #include "opto/graphKit.hpp"
32 #include "opto/idealKit.hpp"
33 #include "opto/macro.hpp"
34 #include "utilities/macros.hpp"
35
36 #define __ ideal.
37
38 Node* CardTableBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
39 DecoratorSet decorators = access.decorators();
40
41 Node* adr = access.addr().node();
42
43 bool is_array = (decorators & IS_ARRAY) != 0;
44 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
45 bool in_heap = (decorators & IN_HEAP) != 0;
46 bool use_precise = is_array || anonymous;
47 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
48
49 if (!access.is_oop() || tightly_coupled_alloc || (!in_heap && !anonymous)) {
50 return BarrierSetC2::store_at_resolved(access, val);
51 }
52
53 assert(access.is_parse_access(), "entry not supported at optimization time");
54 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
55
56 Node* store = BarrierSetC2::store_at_resolved(access, val);
57 post_barrier(parse_access.kit(), access.base(), adr, val.node(), use_precise);
58
59 return store;
60 }
61
62 Node* CardTableBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
63 Node* new_val, const Type* value_type) const {
64 if (!access.is_oop()) {
65 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
66 }
67
68 Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
69
70 post_barrier(access.kit(), access.base(), access.addr().node(), new_val, true);
71
72 return result;
73 }
74
75 Node* CardTableBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
76 Node* new_val, const Type* value_type) const {
77 GraphKit* kit = access.kit();
186 // no_ctrl, but that doesn't buy much latitude.
187 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
188 __ if_then(card_val, BoolTest::ne, dirty);
189 }
190
191 // Smash dirty value into card
192 __ store(__ ctrl(), card_adr, dirty, T_BYTE, adr_type, MemNode::unordered);
193
194 if (UseCondCardMark) {
195 __ end_if();
196 }
197
198 // Final sync IdealKit and GraphKit.
199 kit->final_sync(ideal);
200 }
201
202 bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() {
203 return ReduceInitialCardMarks;
204 }
205
206 void CardTableBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
207 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
208 Node *shift = node->unique_out();
209 Node *addp = shift->unique_out();
210 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
211 Node *mem = addp->last_out(j);
212 if (UseCondCardMark && mem->is_Load()) {
213 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
214 // The load is checking if the card has been written so
215 // replace it with zero to fold the test.
216 macro->replace_node(mem, macro->intcon(0));
217 continue;
218 }
219 assert(mem->is_Store(), "store required");
220 macro->replace_node(mem, mem->in(MemNode::Memory));
221 }
222 }
223
224 bool CardTableBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
225 bool is_oop = is_reference_type(type);
226 return is_oop && (!tightly_coupled_alloc || !use_ReduceInitialCardMarks());
227 }
|
29 #include "gc/shared/gc_globals.hpp"
30 #include "opto/arraycopynode.hpp"
31 #include "opto/graphKit.hpp"
32 #include "opto/idealKit.hpp"
33 #include "opto/macro.hpp"
34 #include "utilities/macros.hpp"
35
36 #define __ ideal.
37
38 Node* CardTableBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
39 DecoratorSet decorators = access.decorators();
40
41 Node* adr = access.addr().node();
42
43 bool is_array = (decorators & IS_ARRAY) != 0;
44 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
45 bool in_heap = (decorators & IN_HEAP) != 0;
46 bool use_precise = is_array || anonymous;
47 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
48
49 const InlineTypeNode* vt = nullptr;
50 if (access.is_parse_access() && static_cast<C2ParseAccess&>(access).vt() != nullptr) {
51 vt = static_cast<C2ParseAccess&>(access).vt();
52 }
53
54 if (vt == nullptr && (!access.is_oop() || tightly_coupled_alloc || (!in_heap && !anonymous))) {
55 return BarrierSetC2::store_at_resolved(access, val);
56 }
57
58 assert(access.is_parse_access(), "entry not supported at optimization time");
59 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
60
61 Node* store = BarrierSetC2::store_at_resolved(access, val);
62 // TODO 8350865
63 // - We actually only need the post barrier once for non-arrays (same for C1, right)?
64 // - Value is only needed to determine if we are storing null. Maybe we can go with a simple boolean?
65 GraphKit* kit = parse_access.kit();
66 if (vt != nullptr) {
67 for (uint i = 0; i < vt->field_count(); ++i) {
68 ciType* type = vt->field_type(i);
69 if (!type->is_primitive_type()) {
70 ciInlineKlass* vk = vt->bottom_type()->inline_klass();
71 int field_offset = vt->field_offset(i) - vk->payload_offset();
72 Node* value = vt->field_value(i);
73 Node* field_adr = kit->basic_plus_adr(access.base(), adr, field_offset);
74 post_barrier(kit, access.base(), field_adr, value, use_precise);
75 }
76 }
77 } else {
78 post_barrier(kit, access.base(), adr, val.node(), use_precise);
79 }
80
81 return store;
82 }
83
84 Node* CardTableBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
85 Node* new_val, const Type* value_type) const {
86 if (!access.is_oop()) {
87 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
88 }
89
90 Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
91
92 post_barrier(access.kit(), access.base(), access.addr().node(), new_val, true);
93
94 return result;
95 }
96
97 Node* CardTableBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
98 Node* new_val, const Type* value_type) const {
99 GraphKit* kit = access.kit();
208 // no_ctrl, but that doesn't buy much latitude.
209 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
210 __ if_then(card_val, BoolTest::ne, dirty);
211 }
212
213 // Smash dirty value into card
214 __ store(__ ctrl(), card_adr, dirty, T_BYTE, adr_type, MemNode::unordered);
215
216 if (UseCondCardMark) {
217 __ end_if();
218 }
219
220 // Final sync IdealKit and GraphKit.
221 kit->final_sync(ideal);
222 }
223
224 bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() {
225 return ReduceInitialCardMarks;
226 }
227
228 void CardTableBarrierSetC2::eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
229 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
230 for (DUIterator_Last imin, i = node->last_outs(imin); i >= imin; --i) {
231 Node* shift = node->last_out(i);
232 for (DUIterator_Last jmin, j = shift->last_outs(jmin); j >= jmin; --j) {
233 Node* addp = shift->last_out(j);
234 for (DUIterator_Last kmin, k = addp->last_outs(kmin); k >= kmin; --k) {
235 Node* mem = addp->last_out(k);
236 if (UseCondCardMark && mem->is_Load()) {
237 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
238 // The load is checking if the card has been written so
239 // replace it with zero to fold the test.
240 igvn->replace_node(mem, igvn->intcon(0));
241 continue;
242 }
243 assert(mem->is_Store(), "store required");
244 igvn->replace_node(mem, mem->in(MemNode::Memory));
245 }
246 }
247 }
248 }
249
250 bool CardTableBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
251 bool is_oop = type == T_OBJECT || type == T_ARRAY;
252 return is_oop && (!tightly_coupled_alloc || !use_ReduceInitialCardMarks());
253 }
|