1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
30 #include "gc/shared/gc_globals.hpp"
31 #include "opto/arraycopynode.hpp"
32 #include "opto/graphKit.hpp"
33 #include "opto/idealKit.hpp"
34 #include "opto/macro.hpp"
35 #include "utilities/macros.hpp"
36
37 #define __ ideal.
38
39 Node* CardTableBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
40 DecoratorSet decorators = access.decorators();
41
42 Node* adr = access.addr().node();
43
44 bool is_array = (decorators & IS_ARRAY) != 0;
45 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
46 bool in_heap = (decorators & IN_HEAP) != 0;
47 bool use_precise = is_array || anonymous;
48 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
49
50 if (!access.is_oop() || tightly_coupled_alloc || (!in_heap && !anonymous)) {
51 return BarrierSetC2::store_at_resolved(access, val);
52 }
53
54 assert(access.is_parse_access(), "entry not supported at optimization time");
55 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
56
57 Node* store = BarrierSetC2::store_at_resolved(access, val);
58 post_barrier(parse_access.kit(), access.base(), adr, val.node(), use_precise);
59
60 return store;
61 }
62
63 Node* CardTableBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
64 Node* new_val, const Type* value_type) const {
65 if (!access.is_oop()) {
66 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
67 }
68
69 Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
70
71 post_barrier(access.kit(), access.base(), access.addr().node(), new_val, true);
72
73 return result;
74 }
75
76 Node* CardTableBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
77 Node* new_val, const Type* value_type) const {
78 GraphKit* kit = access.kit();
194 // no_ctrl, but that doesn't buy much latitude.
195 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
196 __ if_then(card_val, BoolTest::ne, dirty);
197 }
198
199 // Smash dirty value into card
200 __ store(__ ctrl(), card_adr, dirty, T_BYTE, adr_type, MemNode::unordered);
201
202 if (UseCondCardMark) {
203 __ end_if();
204 }
205
206 // Final sync IdealKit and GraphKit.
207 kit->final_sync(ideal);
208 }
209
210 bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() {
211 return ReduceInitialCardMarks;
212 }
213
214 void CardTableBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
215 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
216 Node *shift = node->unique_out();
217 Node *addp = shift->unique_out();
218 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
219 Node *mem = addp->last_out(j);
220 if (UseCondCardMark && mem->is_Load()) {
221 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
222 // The load is checking if the card has been written so
223 // replace it with zero to fold the test.
224 macro->replace_node(mem, macro->intcon(0));
225 continue;
226 }
227 assert(mem->is_Store(), "store required");
228 macro->replace_node(mem, mem->in(MemNode::Memory));
229 }
230 }
231
232 bool CardTableBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
233 bool is_oop = is_reference_type(type);
234 return is_oop && (!tightly_coupled_alloc || !use_ReduceInitialCardMarks());
235 }
|
1 /*
2 * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
30 #include "gc/shared/gc_globals.hpp"
31 #include "opto/arraycopynode.hpp"
32 #include "opto/graphKit.hpp"
33 #include "opto/idealKit.hpp"
34 #include "opto/macro.hpp"
35 #include "utilities/macros.hpp"
36
37 #define __ ideal.
38
39 Node* CardTableBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
40 DecoratorSet decorators = access.decorators();
41
42 Node* adr = access.addr().node();
43
44 bool is_array = (decorators & IS_ARRAY) != 0;
45 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
46 bool in_heap = (decorators & IN_HEAP) != 0;
47 bool use_precise = is_array || anonymous;
48 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
49
50 const InlineTypeNode* vt = nullptr;
51 if (access.is_parse_access() && static_cast<C2ParseAccess&>(access).vt() != nullptr) {
52 vt = static_cast<C2ParseAccess&>(access).vt();
53 }
54
55 if (vt == nullptr && (!access.is_oop() || tightly_coupled_alloc || (!in_heap && !anonymous))) {
56 return BarrierSetC2::store_at_resolved(access, val);
57 }
58
59 assert(access.is_parse_access(), "entry not supported at optimization time");
60 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
61
62 Node* store = BarrierSetC2::store_at_resolved(access, val);
63 // TODO 8350865
64 // - We actually only need the post barrier once for non-arrays (same for C1, right)?
65 // - Value is only needed to determine if we are storing null. Maybe we can go with a simple boolean?
66 GraphKit* kit = parse_access.kit();
67 if (vt != nullptr) {
68 for (uint i = 0; i < vt->field_count(); ++i) {
69 ciField* field = vt->field(i);
70 ciType* type = field->type();
71 if (!type->is_primitive_type()) {
72 ciInlineKlass* vk = vt->bottom_type()->inline_klass();
73 int field_offset = field->offset_in_bytes() - vk->payload_offset();
74 Node* value = vt->field_value(i);
75 Node* field_adr = kit->basic_plus_adr(access.base(), adr, field_offset);
76 post_barrier(kit, access.base(), field_adr, value, use_precise);
77 }
78 }
79 } else {
80 post_barrier(kit, access.base(), adr, val.node(), use_precise);
81 }
82
83 return store;
84 }
85
86 Node* CardTableBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
87 Node* new_val, const Type* value_type) const {
88 if (!access.is_oop()) {
89 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
90 }
91
92 Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
93
94 post_barrier(access.kit(), access.base(), access.addr().node(), new_val, true);
95
96 return result;
97 }
98
99 Node* CardTableBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
100 Node* new_val, const Type* value_type) const {
101 GraphKit* kit = access.kit();
217 // no_ctrl, but that doesn't buy much latitude.
218 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
219 __ if_then(card_val, BoolTest::ne, dirty);
220 }
221
222 // Smash dirty value into card
223 __ store(__ ctrl(), card_adr, dirty, T_BYTE, adr_type, MemNode::unordered);
224
225 if (UseCondCardMark) {
226 __ end_if();
227 }
228
229 // Final sync IdealKit and GraphKit.
230 kit->final_sync(ideal);
231 }
232
233 bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() {
234 return ReduceInitialCardMarks;
235 }
236
237 void CardTableBarrierSetC2::eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
238 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
239 for (DUIterator_Last imin, i = node->last_outs(imin); i >= imin; --i) {
240 Node* shift = node->last_out(i);
241 for (DUIterator_Last jmin, j = shift->last_outs(jmin); j >= jmin; --j) {
242 Node* addp = shift->last_out(j);
243 for (DUIterator_Last kmin, k = addp->last_outs(kmin); k >= kmin; --k) {
244 Node* mem = addp->last_out(k);
245 if (UseCondCardMark && mem->is_Load()) {
246 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
247 // The load is checking if the card has been written so
248 // replace it with zero to fold the test.
249 igvn->replace_node(mem, igvn->intcon(0));
250 continue;
251 }
252 assert(mem->is_Store(), "store required");
253 igvn->replace_node(mem, mem->in(MemNode::Memory));
254 }
255 }
256 }
257 }
258
259 bool CardTableBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
260 bool is_oop = type == T_OBJECT || type == T_ARRAY;
261 return is_oop && (!tightly_coupled_alloc || !use_ReduceInitialCardMarks());
262 }
|