1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciInlineKlass.hpp"
26 #include "gc/shared/c1/cardTableBarrierSetC1.hpp"
27 #include "gc/shared/cardTable.hpp"
28 #include "gc/shared/cardTableBarrierSet.hpp"
29 #include "gc/shared/gc_globals.hpp"
30 #include "utilities/macros.hpp"
31
32 #ifdef ASSERT
33 #define __ gen->lir(__FILE__, __LINE__)->
34 #else
35 #define __ gen->lir()->
36 #endif
37
38 void CardTableBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
39 DecoratorSet decorators = access.decorators();
40 bool is_array = (decorators & IS_ARRAY) != 0;
41 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
42
43 // Is this a flat, atomic access that might require gc barriers on oop fields?
44 ciInlineKlass* vk = access.vk();
45 if (vk != nullptr && vk->has_object_fields()) {
46 // Add pre-barriers for oop fields
47 for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
48 ciField* field = vk->nonstatic_field_at(i);
49 if (!field->type()->is_primitive_type()) {
50 int off = access.offset().opr().as_jint() + field->offset_in_bytes() - vk->payload_offset();
51 LIRAccess inner_access(access.gen(), decorators, access.base(), LIR_OprFact::intConst(off), field->type()->basic_type(), access.patch_emit_info(), access.access_emit_info());
52 pre_barrier(inner_access, resolve_address(inner_access, false),
53 LIR_OprFact::illegalOpr /* pre_val */, inner_access.patch_emit_info());
54 }
55 }
56 }
57
58 if (access.is_oop()) {
59 pre_barrier(access, access.resolved_addr(),
60 LIR_OprFact::illegalOpr /* pre_val */, access.patch_emit_info());
61 }
62
63 BarrierSetC1::store_at_resolved(access, value);
64
65 if (access.is_oop()) {
66 bool precise = is_array || on_anonymous;
67 LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
68 post_barrier(access, post_addr, value);
69 }
70
71 if (vk != nullptr && vk->has_object_fields()) {
72 // Add post-barriers for oop fields
73 for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
74 ciField* field = vk->nonstatic_field_at(i);
75 if (!field->type()->is_primitive_type()) {
76 int inner_off = field->offset_in_bytes() - vk->payload_offset();
77 int off = access.offset().opr().as_jint() + inner_off;
78 LIRAccess inner_access(access.gen(), decorators, access.base(), LIR_OprFact::intConst(off), field->type()->basic_type(), access.patch_emit_info(), access.access_emit_info());
79
80 // Shift long value to extract the narrow oop field value and zero-extend
81 LIR_Opr field_val = access.gen()->new_register(T_LONG);
82 access.gen()->lir()->unsigned_shift_right(value,
83 LIR_OprFact::intConst(inner_off << LogBitsPerByte),
84 field_val, LIR_Opr::illegalOpr());
85 LIR_Opr mask = access.gen()->load_immediate((julong) max_juint, T_LONG);
86 access.gen()->lir()->logical_and(field_val, mask, field_val);
87 LIR_Opr oop_val = access.gen()->new_register(T_OBJECT);
88 access.gen()->lir()->move(field_val, oop_val);
89
90 assert(!is_array && !on_anonymous, "not suppported");
91 post_barrier(inner_access, access.base().opr(), oop_val);
92 }
93 }
94 }
95 }
96
97 LIR_Opr CardTableBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
98 if (access.is_oop()) {
99 pre_barrier(access, access.resolved_addr(),
100 LIR_OprFact::illegalOpr /* pre_val */, nullptr);
101 }
102
103 LIR_Opr result = BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
104
105 if (access.is_oop()) {
106 post_barrier(access, access.resolved_addr(), new_value.result());
107 }
108
109 return result;
110 }
111
112 LIR_Opr CardTableBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
113 if (access.is_oop()) {
114 pre_barrier(access, access.resolved_addr(),
115 LIR_OprFact::illegalOpr /* pre_val */, nullptr);
116 }
117
118 LIR_Opr result = BarrierSetC1::atomic_xchg_at_resolved(access, value);
119
120 if (access.is_oop()) {
121 post_barrier(access, access.resolved_addr(), value.result());
122 }
123
124 return result;
125 }
126
127 // This overrides the default to resolve the address into a register,
128 // assuming it will be used by a write barrier anyway.
129 LIR_Opr CardTableBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
130 DecoratorSet decorators = access.decorators();
131 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
132 bool is_write = (decorators & ACCESS_WRITE) != 0;
133 bool is_array = (decorators & IS_ARRAY) != 0;
134 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
135 bool precise = is_array || on_anonymous;
136 resolve_in_register |= !needs_patching && is_write && access.is_oop() && precise;
137 return BarrierSetC1::resolve_address(access, resolve_in_register);
138 }
139
140 void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
141 DecoratorSet decorators = access.decorators();
142 LIRGenerator* gen = access.gen();
143 bool in_heap = (decorators & IN_HEAP) != 0;
144 if (!in_heap) {
145 return;
146 }
147
148 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
149 LIR_Const* card_table_base = new LIR_Const(ctbs->card_table_base_const());
150
151 if (addr->is_address()) {
152 LIR_Address* address = addr->as_address_ptr();
153 // ptr cannot be an object because we use this barrier for array card marks
154 // and addr can point in the middle of an array.
155 LIR_Opr ptr = gen->new_pointer_register();
156 if (!address->index()->is_valid() && address->disp() == 0) {
157 __ move(address->base(), ptr);
158 } else {
159 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
160 __ leal(addr, ptr);
161 }
162 addr = ptr;
163 }
164 assert(addr->is_register(), "must be a register at this point");
165
166 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
167 gen->CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
168 #else
169 LIR_Opr tmp = gen->new_pointer_register();
170 if (two_operand_lir_form) {
171 LIR_Opr addr_opr = LIR_OprFact::address(new LIR_Address(addr, addr->type()));
172 __ leal(addr_opr, tmp);
173 __ unsigned_shift_right(tmp, CardTable::card_shift(), tmp);
174 } else {
175 __ unsigned_shift_right(addr, CardTable::card_shift(), tmp);
176 }
177
178 LIR_Address* card_addr;
179 if (gen->can_inline_as_constant(card_table_base)) {
180 card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
181 } else {
182 card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE);
183 }
184
185 LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
186 if (UseCondCardMark) {
187 LIR_Opr cur_value = gen->new_register(T_INT);
188 __ move(card_addr, cur_value);
189
190 LabelObj* L_already_dirty = new LabelObj();
191 __ cmp(lir_cond_equal, cur_value, dirty);
192 __ branch(lir_cond_equal, L_already_dirty->label());
193 __ move(dirty, card_addr);
194 __ branch_destination(L_already_dirty->label());
195 } else {
196 __ move(dirty, card_addr);
197 }
198 #endif
199 }