1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciInlineKlass.hpp"
26 #include "gc/shared/c1/cardTableBarrierSetC1.hpp"
27 #include "gc/shared/cardTable.hpp"
28 #include "gc/shared/cardTableBarrierSet.hpp"
29 #include "gc/shared/gc_globals.hpp"
30 #include "utilities/macros.hpp"
31
32 #ifdef ASSERT
33 #define __ gen->lir(__FILE__, __LINE__)->
34 #else
35 #define __ gen->lir()->
36 #endif
37
38 void CardTableBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
39 DecoratorSet decorators = access.decorators();
40 bool is_array = (decorators & IS_ARRAY) != 0;
41 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
42
43 // Is this a flat, atomic access that might require gc barriers on oop fields?
44 ciInlineKlass* vk = access.vk();
45 if (vk != nullptr && vk->has_object_fields()) {
46 // Add pre-barriers for oop fields
47 for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
48 ciField* field = vk->nonstatic_field_at(i);
49 if (!field->type()->is_primitive_type()) {
50 int off = access.offset().opr().as_jint() + field->offset_in_bytes() - vk->payload_offset();
51 LIRAccess inner_access(access.gen(), decorators, access.base(), LIR_OprFact::intConst(off), field->type()->basic_type(), access.patch_emit_info(), access.access_emit_info());
52 pre_barrier(inner_access, resolve_address(inner_access, false),
53 LIR_OprFact::illegalOpr /* pre_val */, inner_access.patch_emit_info());
54 }
55 }
56 }
57
58 if (access.is_oop()) {
59 pre_barrier(access, access.resolved_addr(),
60 LIR_OprFact::illegalOpr /* pre_val */, access.patch_emit_info());
61 }
62
63 BarrierSetC1::store_at_resolved(access, value);
64
65 if (access.is_oop()) {
66 bool precise = is_array || on_anonymous;
67 LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
68 post_barrier(access, post_addr, value);
69 }
70
71 if (vk != nullptr && vk->has_object_fields()) {
72 // Add post-barriers for oop fields
73 for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
74 ciField* field = vk->nonstatic_field_at(i);
75 if (!field->type()->is_primitive_type()) {
76 int inner_off = field->offset_in_bytes() - vk->payload_offset();
77 int off = access.offset().opr().as_jint() + inner_off;
78 LIRAccess inner_access(access.gen(), decorators, access.base(), LIR_OprFact::intConst(off), field->type()->basic_type(), access.patch_emit_info(), access.access_emit_info());
79
80 // Shift long value to extract the narrow oop field value and zero-extend
81 LIR_Opr field_val = access.gen()->new_register(T_LONG);
82 access.gen()->lir()->unsigned_shift_right(value,
83 LIR_OprFact::intConst(inner_off << LogBitsPerByte),
84 field_val, LIR_Opr::illegalOpr());
85 LIR_Opr mask = access.gen()->load_immediate((julong) max_juint, T_LONG);
86 access.gen()->lir()->logical_and(field_val, mask, field_val);
87 LIR_Opr oop_val = access.gen()->new_register(T_OBJECT);
88 access.gen()->lir()->move(field_val, oop_val);
89
90 assert(!is_array && !on_anonymous, "not suppported");
91 post_barrier(inner_access, access.base().opr(), oop_val);
92 }
93 }
94 }
95 }
96
97 LIR_Opr CardTableBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
98 if (access.is_oop()) {
99 pre_barrier(access, access.resolved_addr(),
100 LIR_OprFact::illegalOpr /* pre_val */, nullptr);
101 }
102
103 LIR_Opr result = BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
104
105 if (access.is_oop()) {
106 post_barrier(access, access.resolved_addr(), new_value.result());
107 }
108
109 return result;
110 }
111
112 LIR_Opr CardTableBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
113 if (access.is_oop()) {
114 pre_barrier(access, access.resolved_addr(),
115 LIR_OprFact::illegalOpr /* pre_val */, nullptr);
116 }
117
118 LIR_Opr result = BarrierSetC1::atomic_xchg_at_resolved(access, value);
119
120 if (access.is_oop()) {
121 post_barrier(access, access.resolved_addr(), value.result());
122 }
123
124 return result;
125 }
126
127 // This overrides the default to resolve the address into a register,
128 // assuming it will be used by a write barrier anyway.
129 LIR_Opr CardTableBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
130 DecoratorSet decorators = access.decorators();
131 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
132 bool is_write = (decorators & ACCESS_WRITE) != 0;
133 bool is_array = (decorators & IS_ARRAY) != 0;
134 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
135 bool precise = is_array || on_anonymous;
136 resolve_in_register |= !needs_patching && is_write && access.is_oop() && precise;
137 return BarrierSetC1::resolve_address(access, resolve_in_register);
138 }
139
140 void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
141 DecoratorSet decorators = access.decorators();
142 LIRGenerator* gen = access.gen();
143 bool in_heap = (decorators & IN_HEAP) != 0;
144 if (!in_heap) {
145 return;
146 }
147
148 BarrierSet* bs = BarrierSet::barrier_set();
149 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
150 CardTable* ct = ctbs->card_table();
151 LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base());
152 SHENANDOAHGC_ONLY(assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");)
153
154 if (addr->is_address()) {
155 LIR_Address* address = addr->as_address_ptr();
156 // ptr cannot be an object because we use this barrier for array card marks
157 // and addr can point in the middle of an array.
158 LIR_Opr ptr = gen->new_pointer_register();
159 if (!address->index()->is_valid() && address->disp() == 0) {
160 __ move(address->base(), ptr);
161 } else {
162 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
163 __ leal(addr, ptr);
164 }
165 addr = ptr;
166 }
167 assert(addr->is_register(), "must be a register at this point");
168
169 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
170 gen->CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
171 #else
172 LIR_Opr tmp = gen->new_pointer_register();
173 if (two_operand_lir_form) {
174 LIR_Opr addr_opr = LIR_OprFact::address(new LIR_Address(addr, addr->type()));
175 __ leal(addr_opr, tmp);
176 __ unsigned_shift_right(tmp, CardTable::card_shift(), tmp);
177 } else {
178 __ unsigned_shift_right(addr, CardTable::card_shift(), tmp);
179 }
180
181 LIR_Address* card_addr;
182 if (gen->can_inline_as_constant(card_table_base)) {
183 card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
184 } else {
185 card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE);
186 }
187
188 LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
189 if (UseCondCardMark) {
190 LIR_Opr cur_value = gen->new_register(T_INT);
191 __ move(card_addr, cur_value);
192
193 LabelObj* L_already_dirty = new LabelObj();
194 __ cmp(lir_cond_equal, cur_value, dirty);
195 __ branch(lir_cond_equal, L_already_dirty->label());
196 __ move(dirty, card_addr);
197 __ branch_destination(L_already_dirty->label());
198 } else {
199 __ move(dirty, card_addr);
200 }
201 #endif
202 }