1 /*
  2  * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_IR.hpp"
 27 #include "gc/shared/satbMarkQueue.hpp"
 28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 30 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 33 #include "gc/shenandoah/shenandoahRuntime.hpp"
 34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 35 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
 36 
 37 #ifdef ASSERT
 38 #define __ gen->lir(__FILE__, __LINE__)->
 39 #else
 40 #define __ gen->lir()->
 41 #endif
 42 
 43 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
 44   ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 45   bs->gen_pre_barrier_stub(ce, this);
 46 }
 47 
 48 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
 49   ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 50   bs->gen_load_reference_barrier_stub(ce, this);
 51 }
 52 
 53 ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
 54   _pre_barrier_c1_runtime_code_blob(NULL),
 55   _load_reference_barrier_strong_rt_code_blob(NULL),
 56   _load_reference_barrier_strong_native_rt_code_blob(NULL),
 57   _load_reference_barrier_weak_rt_code_blob(NULL),
 58   _load_reference_barrier_phantom_rt_code_blob(NULL) {}
 59 
 60 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
 61   // First we test whether marking is in progress.
 62   BasicType flag_type;
 63   bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
 64   bool do_load = pre_val == LIR_OprFact::illegalOpr;
 65   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 66     flag_type = T_INT;
 67   } else {
 68     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
 69               "Assumption");
 70     // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
 71     // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
 72     flag_type = T_BOOLEAN;
 73   }
 74   LIR_Opr thrd = gen->getThreadPointer();
 75   LIR_Address* mark_active_flag_addr =
 76     new LIR_Address(thrd,
 77                     in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()),
 78                     flag_type);
 79   // Read the marking-in-progress flag.
 80   LIR_Opr flag_val = gen->new_register(T_INT);
 81   __ load(mark_active_flag_addr, flag_val);
 82   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
 83 
 84   LIR_PatchCode pre_val_patch_code = lir_patch_none;
 85 
 86   CodeStub* slow;
 87 
 88   if (do_load) {
 89     assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
 90     assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
 91 
 92     if (patch)
 93       pre_val_patch_code = lir_patch_normal;
 94 
 95     pre_val = gen->new_register(T_OBJECT);
 96 
 97     if (!addr_opr->is_address()) {
 98       assert(addr_opr->is_register(), "must be");
 99       addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
100     }
101     slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL);
102   } else {
103     assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
104     assert(pre_val->is_register(), "must be");
105     assert(pre_val->type() == T_OBJECT, "must be an object");
106 
107     slow = new ShenandoahPreBarrierStub(pre_val);
108   }
109 
110   __ branch(lir_cond_notEqual, slow);
111   __ branch_destination(slow->continuation());
112 }
113 
114 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, DecoratorSet decorators) {
115   if (ShenandoahLoadRefBarrier) {
116     return load_reference_barrier_impl(gen, obj, addr, decorators);
117   } else {
118     return obj;
119   }
120 }
121 
122 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, DecoratorSet decorators) {
123   assert(ShenandoahLoadRefBarrier, "Should be enabled");
124 
125   obj = ensure_in_register(gen, obj, T_OBJECT);
126   assert(obj->is_register(), "must be a register at this point");
127   addr = ensure_in_register(gen, addr, T_ADDRESS);
128   assert(addr->is_register(), "must be a register at this point");
129   LIR_Opr result = gen->result_register_for(obj->value_type());
130   __ move(obj, result);
131   LIR_Opr tmp1 = gen->new_register(T_ADDRESS);
132   LIR_Opr tmp2 = gen->new_register(T_ADDRESS);
133 
134   LIR_Opr thrd = gen->getThreadPointer();
135   LIR_Address* active_flag_addr =
136     new LIR_Address(thrd,
137                     in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
138                     T_BYTE);
139   // Read and check the gc-state-flag.
140   LIR_Opr flag_val = gen->new_register(T_INT);
141   __ load(active_flag_addr, flag_val);
142   int flags = ShenandoahHeap::HAS_FORWARDED;
143   if (!ShenandoahBarrierSet::is_strong_access(decorators)) {
144     flags |= ShenandoahHeap::WEAK_ROOTS;
145   }
146   LIR_Opr mask = LIR_OprFact::intConst(flags);
147   LIR_Opr mask_reg = gen->new_register(T_INT);
148   __ move(mask, mask_reg);
149 
150   if (TwoOperandLIRForm) {
151     __ logical_and(flag_val, mask_reg, flag_val);
152   } else {
153     LIR_Opr masked_flag = gen->new_register(T_INT);
154     __ logical_and(flag_val, mask_reg, masked_flag);
155     flag_val = masked_flag;
156   }
157   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
158 
159   CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, decorators);
160   __ branch(lir_cond_notEqual, slow);
161   __ branch_destination(slow->continuation());
162 
163   return result;
164 }
165 
166 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type) {
167   if (!obj->is_register()) {
168     LIR_Opr obj_reg;
169     if (obj->is_constant()) {
170       obj_reg = gen->new_register(type);
171       __ move(obj, obj_reg);
172     } else {
173       obj_reg = gen->new_pointer_register();
174       __ leal(obj, obj_reg);
175     }
176     obj = obj_reg;
177   }
178   return obj;
179 }
180 
181 LIR_Opr ShenandoahBarrierSetC1::iu_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
182   if (ShenandoahIUBarrier) {
183     obj = ensure_in_register(gen, obj, T_OBJECT);
184     pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
185   }
186   return obj;
187 }
188 
189 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
190   if (access.is_oop()) {
191     if (ShenandoahSATBBarrier) {
192       pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
193     }
194     value = iu_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
195   }
196   BarrierSetC1::store_at_resolved(access, value);
197 
198   if (access.is_oop()) {
199     DecoratorSet decorators = access.decorators();
200     bool is_array = (decorators & IS_ARRAY) != 0;
201     bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
202 
203     bool precise = is_array || on_anonymous;
204     LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
205     post_barrier(access, post_addr, value);
206   }
207 }
208 
209 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
210   // We must resolve in register when patching. This is to avoid
211   // having a patch area in the load barrier stub, since the call
212   // into the runtime to patch will not have the proper oop map.
213   const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
214   return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
215 }
216 
217 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
218   // 1: non-reference load, no additional barrier is needed
219   if (!access.is_oop()) {
220     BarrierSetC1::load_at_resolved(access, result);
221     return;
222   }
223 
224   LIRGenerator* gen = access.gen();
225   DecoratorSet decorators = access.decorators();
226   BasicType type = access.type();
227 
228   // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set
229   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
230     LIR_Opr tmp = gen->new_register(T_OBJECT);
231     BarrierSetC1::load_at_resolved(access, tmp);
232     tmp = load_reference_barrier(gen, tmp, access.resolved_addr(), decorators);
233     __ move(tmp, result);
234   } else {
235     BarrierSetC1::load_at_resolved(access, result);
236   }
237 
238   // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
239   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
240     bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
241 
242     // Register the value in the referent field with the pre-barrier
243     LabelObj *Lcont_anonymous;
244     if (is_anonymous) {
245       Lcont_anonymous = new LabelObj();
246       generate_referent_check(access, Lcont_anonymous);
247     }
248     pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */,
249                 result /* pre_val */);
250     if (is_anonymous) {
251       __ branch_destination(Lcont_anonymous->label());
252     }
253   }
254 }
255 
256 class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
257   virtual OopMapSet* generate_code(StubAssembler* sasm) {
258     ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
259     bs->generate_c1_pre_barrier_runtime_stub(sasm);
260     return NULL;
261   }
262 };
263 
264 class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
265 private:
266   const DecoratorSet _decorators;
267 
268 public:
269   C1ShenandoahLoadReferenceBarrierCodeGenClosure(DecoratorSet decorators) : _decorators(decorators) {}
270 
271   virtual OopMapSet* generate_code(StubAssembler* sasm) {
272     ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
273     bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _decorators);
274     return NULL;
275   }
276 };
277 
278 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
279   C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl;
280   _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1,
281                                                               "shenandoah_pre_barrier_slow",
282                                                               false, &pre_code_gen_cl);
283   if (ShenandoahLoadRefBarrier) {
284     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_code_gen_cl(ON_STRONG_OOP_REF);
285     _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
286                                                                   "shenandoah_load_reference_barrier_strong_slow",
287                                                                   false, &lrb_strong_code_gen_cl);
288 
289     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
290     _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
291                                                                           "shenandoah_load_reference_barrier_strong_native_slow",
292                                                                           false, &lrb_strong_native_code_gen_cl);
293 
294     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
295     _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
296                                                                           "shenandoah_load_reference_barrier_weak_slow",
297                                                                           false, &lrb_weak_code_gen_cl);
298 
299     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
300     _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
301                                                                            "shenandoah_load_reference_barrier_phantom_slow",
302                                                                            false, &lrb_phantom_code_gen_cl);
303   }
304 }
305 
306 void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, LIR_OprDesc* new_val) {
307   if (!ShenandoahHeap::heap()->mode()->is_generational()) {
308     return;
309   }
310 
311   DecoratorSet decorators = access.decorators();
312   LIRGenerator* gen = access.gen();
313   bool in_heap = (decorators & IN_HEAP) != 0;
314   if (!in_heap) {
315     return;
316   }
317 
318   BarrierSet* bs = BarrierSet::barrier_set();
319   ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
320   CardTable* ct = ctbs->card_table();
321   LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base());
322   if (addr->is_address()) {
323     LIR_Address* address = addr->as_address_ptr();
324     // ptr cannot be an object because we use this barrier for array card marks
325     // and addr can point in the middle of an array.
326     LIR_Opr ptr = gen->new_pointer_register();
327     if (!address->index()->is_valid() && address->disp() == 0) {
328       __ move(address->base(), ptr);
329     } else {
330       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
331       __ leal(addr, ptr);
332     }
333     addr = ptr;
334   }
335   assert(addr->is_register(), "must be a register at this point");
336 
337   LIR_Opr tmp = gen->new_pointer_register();
338   if (TwoOperandLIRForm) {
339     __ move(addr, tmp);
340     __ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
341   } else {
342     __ unsigned_shift_right(addr, CardTable::card_shift, tmp);
343   }
344 
345   LIR_Address* card_addr;
346   if (gen->can_inline_as_constant(card_table_base)) {
347     card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
348   } else {
349     card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE);
350   }
351 
352   LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
353   if (UseCondCardMark) {
354     LIR_Opr cur_value = gen->new_register(T_INT);
355     __ move(card_addr, cur_value);
356 
357     LabelObj* L_already_dirty = new LabelObj();
358     __ cmp(lir_cond_equal, cur_value, dirty);
359     __ branch(lir_cond_equal, L_already_dirty->label());
360     __ move(dirty, card_addr);
361     __ branch_destination(L_already_dirty->label());
362   } else {
363     __ move(dirty, card_addr);
364   }
365 }