1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "ci/ciInlineKlass.hpp"
 26 #include "code/aotCodeCache.hpp"
 27 #include "gc/shared/c1/cardTableBarrierSetC1.hpp"
 28 #include "gc/shared/cardTable.hpp"
 29 #include "gc/shared/cardTableBarrierSet.hpp"
 30 #include "gc/shared/gc_globals.hpp"
 31 #include "utilities/macros.hpp"
 32 
 33 #ifdef ASSERT
 34 #define __ gen->lir(__FILE__, __LINE__)->
 35 #else
 36 #define __ gen->lir()->
 37 #endif
 38 
 39 void CardTableBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
 40   DecoratorSet decorators = access.decorators();
 41   bool is_array = (decorators & IS_ARRAY) != 0;
 42   bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 43 
 44   // Is this a flat, atomic access that might require gc barriers on oop fields?
 45   ciInlineKlass* vk = access.vk();
 46   if (vk != nullptr && vk->has_object_fields()) {
 47     // Add pre-barriers for oop fields
 48     for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
 49       ciField* field = vk->nonstatic_field_at(i);
 50       if (!field->type()->is_primitive_type()) {
 51         int off = access.offset().opr().as_jint() + field->offset_in_bytes() - vk->payload_offset();
 52         LIRAccess inner_access(access.gen(), decorators, access.base(), LIR_OprFact::intConst(off), field->type()->basic_type(), access.patch_emit_info(), access.access_emit_info());
 53         pre_barrier(inner_access, resolve_address(inner_access, false),
 54                     LIR_OprFact::illegalOpr /* pre_val */, inner_access.patch_emit_info());
 55       }
 56     }
 57   }
 58 
 59   if (access.is_oop()) {
 60     pre_barrier(access, access.resolved_addr(),
 61                 LIR_OprFact::illegalOpr /* pre_val */, access.patch_emit_info());
 62   }
 63 
 64   BarrierSetC1::store_at_resolved(access, value);
 65 
 66   if (access.is_oop()) {
 67     bool precise = is_array || on_anonymous;
 68     LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
 69     post_barrier(access, post_addr, value);
 70   }
 71 
 72   if (vk != nullptr && vk->has_object_fields()) {
 73     // Add post-barriers for oop fields
 74     for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
 75       ciField* field = vk->nonstatic_field_at(i);
 76       if (!field->type()->is_primitive_type()) {
 77         int inner_off = field->offset_in_bytes() - vk->payload_offset();
 78         int off = access.offset().opr().as_jint() + inner_off;
 79         LIRAccess inner_access(access.gen(), decorators, access.base(), LIR_OprFact::intConst(off), field->type()->basic_type(), access.patch_emit_info(), access.access_emit_info());
 80 
 81         // Shift long value to extract the narrow oop field value and zero-extend
 82         LIR_Opr field_val = access.gen()->new_register(T_LONG);
 83         access.gen()->lir()->unsigned_shift_right(value,
 84                                                   LIR_OprFact::intConst(inner_off << LogBitsPerByte),
 85                                                   field_val, LIR_Opr::illegalOpr());
 86         LIR_Opr mask = access.gen()->load_immediate((julong) max_juint, T_LONG);
 87         access.gen()->lir()->logical_and(field_val, mask, field_val);
 88         LIR_Opr oop_val = access.gen()->new_register(T_OBJECT);
 89         access.gen()->lir()->move(field_val, oop_val);
 90 
 91         assert(!is_array && !on_anonymous, "not suppported");
 92         post_barrier(inner_access, access.base().opr(), oop_val);
 93       }
 94     }
 95   }
 96 }
 97 
 98 LIR_Opr CardTableBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
 99   if (access.is_oop()) {
100     pre_barrier(access, access.resolved_addr(),
101                 LIR_OprFact::illegalOpr /* pre_val */, nullptr);
102   }
103 
104   LIR_Opr result = BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
105 
106   if (access.is_oop()) {
107     post_barrier(access, access.resolved_addr(), new_value.result());
108   }
109 
110   return result;
111 }
112 
113 LIR_Opr CardTableBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
114   if (access.is_oop()) {
115     pre_barrier(access, access.resolved_addr(),
116                 LIR_OprFact::illegalOpr /* pre_val */, nullptr);
117   }
118 
119   LIR_Opr result = BarrierSetC1::atomic_xchg_at_resolved(access, value);
120 
121   if (access.is_oop()) {
122     post_barrier(access, access.resolved_addr(), value.result());
123   }
124 
125   return result;
126 }
127 
128 // This overrides the default to resolve the address into a register,
129 // assuming it will be used by a write barrier anyway.
130 LIR_Opr CardTableBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
131   DecoratorSet decorators = access.decorators();
132   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
133   bool is_write = (decorators & ACCESS_WRITE) != 0;
134   bool is_array = (decorators & IS_ARRAY) != 0;
135   bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
136   bool precise = is_array || on_anonymous;
137   resolve_in_register |= !needs_patching && is_write && access.is_oop() && precise;
138   return BarrierSetC1::resolve_address(access, resolve_in_register);
139 }
140 
141 void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
142   DecoratorSet decorators = access.decorators();
143   LIRGenerator* gen = access.gen();
144   bool in_heap = (decorators & IN_HEAP) != 0;
145   if (!in_heap) {
146     return;
147   }
148 
149   CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
150   LIR_Const* card_table_base = new LIR_Const(ctbs->card_table_base_const());
151 
152   if (addr->is_address()) {
153     LIR_Address* address = addr->as_address_ptr();
154     // ptr cannot be an object because we use this barrier for array card marks
155     // and addr can point in the middle of an array.
156     LIR_Opr ptr = gen->new_pointer_register();
157     if (!address->index()->is_valid() && address->disp() == 0) {
158       __ move(address->base(), ptr);
159     } else {
160       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
161       __ leal(addr, ptr);
162     }
163     addr = ptr;
164   }
165   assert(addr->is_register(), "must be a register at this point");
166 
167 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
168   assert(!AOTCodeCache::is_on(), "this path is not implemented");
169   gen->CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
170 #else
171   LIR_Opr tmp = gen->new_pointer_register();
172   if (two_operand_lir_form) {
173     LIR_Opr addr_opr = LIR_OprFact::address(new LIR_Address(addr, addr->type()));
174     __ leal(addr_opr, tmp);
175     __ unsigned_shift_right(tmp, CardTable::card_shift(), tmp);
176   } else {
177     __ unsigned_shift_right(addr, CardTable::card_shift(), tmp);
178   }
179 
180   LIR_Address* card_addr;
181 #if INCLUDE_CDS
182   if (AOTCodeCache::is_on_for_dump()) {
183     // load the card table address from the AOT Runtime Constants area
184     LIR_Opr byte_map_base_adr = LIR_OprFact::intptrConst(AOTRuntimeConstants::card_table_base_address());
185     LIR_Opr byte_map_base_reg = gen->new_pointer_register();
186     __ move(byte_map_base_adr, byte_map_base_reg);
187     LIR_Address* byte_map_base_indirect = new LIR_Address(byte_map_base_reg, 0, T_LONG);
188     __ move(byte_map_base_indirect, byte_map_base_reg);
189     card_addr = new LIR_Address(tmp, byte_map_base_reg, T_BYTE);
190   } else
191 #endif
192   if (gen->can_inline_as_constant(card_table_base)) {
193     card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
194   } else {
195     card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE);
196   }
197 
198   LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
199   if (UseCondCardMark) {
200     LIR_Opr cur_value = gen->new_register(T_INT);
201     __ move(card_addr, cur_value);
202 
203     LabelObj* L_already_dirty = new LabelObj();
204     __ cmp(lir_cond_equal, cur_value, dirty);
205     __ branch(lir_cond_equal, L_already_dirty->label());
206     __ move(dirty, card_addr);
207     __ branch_destination(L_already_dirty->label());
208   } else {
209     __ move(dirty, card_addr);
210   }
211 #endif
212 }