1 /*
  2  * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "c1/c1_IR.hpp"
 28 #include "gc/shared/satbMarkQueue.hpp"
 29 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 30 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 34 #include "gc/shenandoah/shenandoahRuntime.hpp"
 35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
 37 
 38 #ifdef ASSERT
 39 #define __ gen->lir(__FILE__, __LINE__)->
 40 #else
 41 #define __ gen->lir()->
 42 #endif
 43 
 44 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
 45   ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 46   bs->gen_pre_barrier_stub(ce, this);
 47 }
 48 
 49 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
 50   ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 51   bs->gen_load_reference_barrier_stub(ce, this);
 52 }
 53 
 54 ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
 55   _pre_barrier_c1_runtime_code_blob(nullptr),
 56   _load_reference_barrier_strong_rt_code_blob(nullptr),
 57   _load_reference_barrier_strong_native_rt_code_blob(nullptr),
 58   _load_reference_barrier_weak_rt_code_blob(nullptr),
 59   _load_reference_barrier_phantom_rt_code_blob(nullptr) {}
 60 
 61 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
 62   // First we test whether marking is in progress.
 63   BasicType flag_type;
 64   bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
 65   bool do_load = pre_val == LIR_OprFact::illegalOpr;
 66   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 67     flag_type = T_INT;
 68   } else {
 69     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
 70               "Assumption");
 71     // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
 72     // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
 73     flag_type = T_BOOLEAN;
 74   }
 75   LIR_Opr thrd = gen->getThreadPointer();
 76   LIR_Address* mark_active_flag_addr =
 77     new LIR_Address(thrd,
 78                     in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()),
 79                     flag_type);
 80   // Read the marking-in-progress flag.
 81   LIR_Opr flag_val = gen->new_register(T_INT);
 82   __ load(mark_active_flag_addr, flag_val);
 83   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
 84 
 85   LIR_PatchCode pre_val_patch_code = lir_patch_none;
 86 
 87   CodeStub* slow;
 88 
 89   if (do_load) {
 90     assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
 91     assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
 92 
 93     if (patch)
 94       pre_val_patch_code = lir_patch_normal;
 95 
 96     pre_val = gen->new_register(T_OBJECT);
 97 
 98     if (!addr_opr->is_address()) {
 99       assert(addr_opr->is_register(), "must be");
100       addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
101     }
102     slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : nullptr);
103   } else {
104     assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
105     assert(pre_val->is_register(), "must be");
106     assert(pre_val->type() == T_OBJECT, "must be an object");
107 
108     slow = new ShenandoahPreBarrierStub(pre_val);
109   }
110 
111   __ branch(lir_cond_notEqual, slow);
112   __ branch_destination(slow->continuation());
113 }
114 
115 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, DecoratorSet decorators) {
116   if (ShenandoahLoadRefBarrier) {
117     return load_reference_barrier_impl(gen, obj, addr, decorators);
118   } else {
119     return obj;
120   }
121 }
122 
123 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, DecoratorSet decorators) {
124   assert(ShenandoahLoadRefBarrier, "Should be enabled");
125 
126   obj = ensure_in_register(gen, obj, T_OBJECT);
127   assert(obj->is_register(), "must be a register at this point");
128   addr = ensure_in_register(gen, addr, T_ADDRESS);
129   assert(addr->is_register(), "must be a register at this point");
130   LIR_Opr result = gen->result_register_for(obj->value_type());
131   __ move(obj, result);
132   LIR_Opr tmp1 = gen->new_register(T_ADDRESS);
133   LIR_Opr tmp2 = gen->new_register(T_ADDRESS);
134 
135   LIR_Opr thrd = gen->getThreadPointer();
136   LIR_Address* active_flag_addr =
137     new LIR_Address(thrd,
138                     in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
139                     T_BYTE);
140   // Read and check the gc-state-flag.
141   LIR_Opr flag_val = gen->new_register(T_INT);
142   __ load(active_flag_addr, flag_val);
143   int flags = ShenandoahHeap::HAS_FORWARDED;
144   if (!ShenandoahBarrierSet::is_strong_access(decorators)) {
145     flags |= ShenandoahHeap::WEAK_ROOTS;
146   }
147   LIR_Opr mask = LIR_OprFact::intConst(flags);
148   LIR_Opr mask_reg = gen->new_register(T_INT);
149   __ move(mask, mask_reg);
150 
151   if (two_operand_lir_form) {
152     __ logical_and(flag_val, mask_reg, flag_val);
153   } else {
154     LIR_Opr masked_flag = gen->new_register(T_INT);
155     __ logical_and(flag_val, mask_reg, masked_flag);
156     flag_val = masked_flag;
157   }
158   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
159 
160   CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, decorators);
161   __ branch(lir_cond_notEqual, slow);
162   __ branch_destination(slow->continuation());
163 
164   return result;
165 }
166 
167 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type) {
168   if (!obj->is_register()) {
169     LIR_Opr obj_reg;
170     if (obj->is_constant()) {
171       obj_reg = gen->new_register(type);
172       __ move(obj, obj_reg);
173     } else {
174       obj_reg = gen->new_pointer_register();
175       __ leal(obj, obj_reg);
176     }
177     obj = obj_reg;
178   }
179   return obj;
180 }
181 
182 LIR_Opr ShenandoahBarrierSetC1::iu_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
183   if (ShenandoahIUBarrier) {
184     obj = ensure_in_register(gen, obj, T_OBJECT);
185     pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
186   }
187   return obj;
188 }
189 
190 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
191   if (access.is_oop()) {
192     if (ShenandoahSATBBarrier) {
193       pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
194     }
195     value = iu_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
196   }
197   BarrierSetC1::store_at_resolved(access, value);
198 
199   if (ShenandoahCardBarrier && access.is_oop()) {
200     DecoratorSet decorators = access.decorators();
201     bool is_array = (decorators & IS_ARRAY) != 0;
202     bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
203 
204     bool precise = is_array || on_anonymous;
205     LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
206     post_barrier(access, post_addr, value);
207   }
208 }
209 
210 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
211   // We must resolve in register when patching. This is to avoid
212   // having a patch area in the load barrier stub, since the call
213   // into the runtime to patch will not have the proper oop map.
214   const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
215   return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
216 }
217 
218 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
219   // 1: non-reference load, no additional barrier is needed
220   if (!access.is_oop()) {
221     BarrierSetC1::load_at_resolved(access, result);
222     return;
223   }
224 
225   LIRGenerator* gen = access.gen();
226   DecoratorSet decorators = access.decorators();
227   BasicType type = access.type();
228 
229   // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set
230   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
231     LIR_Opr tmp = gen->new_register(T_OBJECT);
232     BarrierSetC1::load_at_resolved(access, tmp);
233     tmp = load_reference_barrier(gen, tmp, access.resolved_addr(), decorators);
234     __ move(tmp, result);
235   } else {
236     BarrierSetC1::load_at_resolved(access, result);
237   }
238 
239   // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
240   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
241     bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
242 
243     // Register the value in the referent field with the pre-barrier
244     LabelObj *Lcont_anonymous;
245     if (is_anonymous) {
246       Lcont_anonymous = new LabelObj();
247       generate_referent_check(access, Lcont_anonymous);
248     }
249     pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */,
250                 result /* pre_val */);
251     if (is_anonymous) {
252       __ branch_destination(Lcont_anonymous->label());
253     }
254   }
255 }
256 
257 class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
258   virtual OopMapSet* generate_code(StubAssembler* sasm) {
259     ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
260     bs->generate_c1_pre_barrier_runtime_stub(sasm);
261     return nullptr;
262   }
263 };
264 
265 class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
266 private:
267   const DecoratorSet _decorators;
268 
269 public:
270   C1ShenandoahLoadReferenceBarrierCodeGenClosure(DecoratorSet decorators) : _decorators(decorators) {}
271 
272   virtual OopMapSet* generate_code(StubAssembler* sasm) {
273     ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
274     bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _decorators);
275     return nullptr;
276   }
277 };
278 
279 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
280   C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl;
281   _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1,
282                                                               "shenandoah_pre_barrier_slow",
283                                                               false, &pre_code_gen_cl);
284   if (ShenandoahLoadRefBarrier) {
285     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_code_gen_cl(ON_STRONG_OOP_REF);
286     _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
287                                                                   "shenandoah_load_reference_barrier_strong_slow",
288                                                                   false, &lrb_strong_code_gen_cl);
289 
290     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
291     _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
292                                                                           "shenandoah_load_reference_barrier_strong_native_slow",
293                                                                           false, &lrb_strong_native_code_gen_cl);
294 
295     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
296     _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
297                                                                           "shenandoah_load_reference_barrier_weak_slow",
298                                                                           false, &lrb_weak_code_gen_cl);
299 
300     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
301     _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
302                                                                            "shenandoah_load_reference_barrier_phantom_slow",
303                                                                            false, &lrb_phantom_code_gen_cl);
304   }
305 }
306 
307 void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
308   assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
309 
310   DecoratorSet decorators = access.decorators();
311   LIRGenerator* gen = access.gen();
312   bool in_heap = (decorators & IN_HEAP) != 0;
313   if (!in_heap) {
314     return;
315   }
316 
317   BarrierSet* bs = BarrierSet::barrier_set();
318   ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
319   CardTable* ct = ctbs->card_table();
320   LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base());
321   if (addr->is_address()) {
322     LIR_Address* address = addr->as_address_ptr();
323     // ptr cannot be an object because we use this barrier for array card marks
324     // and addr can point in the middle of an array.
325     LIR_Opr ptr = gen->new_pointer_register();
326     if (!address->index()->is_valid() && address->disp() == 0) {
327       __ move(address->base(), ptr);
328     } else {
329       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
330       __ leal(addr, ptr);
331     }
332     addr = ptr;
333   }
334   assert(addr->is_register(), "must be a register at this point");
335 
336   LIR_Opr tmp = gen->new_pointer_register();
337   if (two_operand_lir_form) {
338     __ move(addr, tmp);
339     __ unsigned_shift_right(tmp, CardTable::card_shift(), tmp);
340   } else {
341     __ unsigned_shift_right(addr, CardTable::card_shift(), tmp);
342   }
343 
344   LIR_Address* card_addr;
345   if (gen->can_inline_as_constant(card_table_base)) {
346     card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
347   } else {
348     card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE);
349   }
350 
351   LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
352   if (UseCondCardMark) {
353     LIR_Opr cur_value = gen->new_register(T_INT);
354     __ move(card_addr, cur_value);
355 
356     LabelObj* L_already_dirty = new LabelObj();
357     __ cmp(lir_cond_equal, cur_value, dirty);
358     __ branch(lir_cond_equal, L_already_dirty->label());
359     __ move(dirty, card_addr);
360     __ branch_destination(L_already_dirty->label());
361   } else {
362     __ move(dirty, card_addr);
363   }
364 }