1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "c1/c1_IR.hpp"
  26 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.hpp"
  28 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  29 #include "gc/shenandoah/shenandoahSATBMarkQueue.hpp"
  30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  31 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  32 
  33 #ifdef ASSERT
  34 #define __ gen->lir(__FILE__, __LINE__)->
  35 #else
  36 #define __ gen->lir()->
  37 #endif
  38 
  39 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
  40   ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
  41   bs->gen_pre_barrier_stub(ce, this);
  42 }
  43 
  44 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
  45   ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
  46   bs->gen_load_reference_barrier_stub(ce, this);
  47 }
  48 
  49 ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
  50   _pre_barrier_c1_runtime_code_blob(NULL),
  51   _load_reference_barrier_rt_code_blob(NULL) {}
  52 
  53 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
  54   // First we test whether marking is in progress.
  55   BasicType flag_type;
  56   bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
  57   bool do_load = pre_val == LIR_OprFact::illegalOpr;
  58   if (in_bytes(ShenandoahSATBMarkQueue::byte_width_of_active()) == 4) {
  59     flag_type = T_INT;
  60   } else {
  61     guarantee(in_bytes(ShenandoahSATBMarkQueue::byte_width_of_active()) == 1,
  62               "Assumption");
  63     // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
  64     // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
  65     flag_type = T_BOOLEAN;
  66   }
  67   LIR_Opr thrd = gen->getThreadPointer();
  68   LIR_Address* mark_active_flag_addr =
  69     new LIR_Address(thrd,
  70                     in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()),
  71                     flag_type);
  72   // Read the marking-in-progress flag.
  73   LIR_Opr flag_val = gen->new_register(T_INT);
  74   __ load(mark_active_flag_addr, flag_val);
  75   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
  76 
  77   LIR_PatchCode pre_val_patch_code = lir_patch_none;
  78 
  79   CodeStub* slow;
  80 
  81   if (do_load) {
  82     assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
  83     assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
  84 
  85     if (patch)
  86       pre_val_patch_code = lir_patch_normal;
  87 
  88     pre_val = gen->new_register(T_OBJECT);
  89 
  90     if (!addr_opr->is_address()) {
  91       assert(addr_opr->is_register(), "must be");
  92       addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
  93     }
  94     slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL);
  95   } else {
  96     assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
  97     assert(pre_val->is_register(), "must be");
  98     assert(pre_val->type() == T_OBJECT, "must be an object");
  99 
 100     slow = new ShenandoahPreBarrierStub(pre_val);
 101   }
 102 
 103   __ branch(lir_cond_notEqual, T_INT, slow);
 104   __ branch_destination(slow->continuation());
 105 }
 106 
 107 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) {
 108   if (ShenandoahLoadRefBarrier) {
 109     return load_reference_barrier_impl(gen, obj, addr);
 110   } else {
 111     return obj;
 112   }
 113 }
 114 
 115 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) {
 116   assert(ShenandoahLoadRefBarrier, "Should be enabled");
 117 
 118   obj = ensure_in_register(gen, obj);
 119   assert(obj->is_register(), "must be a register at this point");
 120   addr = ensure_in_register(gen, addr);
 121   assert(addr->is_register(), "must be a register at this point");
 122   LIR_Opr result = gen->result_register_for(obj->value_type());
 123   __ move(obj, result);
 124   LIR_Opr tmp1 = gen->new_register(T_OBJECT);
 125   LIR_Opr tmp2 = gen->new_register(T_OBJECT);
 126 
 127   LIR_Opr thrd = gen->getThreadPointer();
 128   LIR_Address* active_flag_addr =
 129     new LIR_Address(thrd,
 130                     in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
 131                     T_BYTE);
 132   // Read and check the gc-state-flag.
 133   LIR_Opr flag_val = gen->new_register(T_INT);
 134   __ load(active_flag_addr, flag_val);
 135   LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::HAS_FORWARDED |
 136                                        ShenandoahHeap::EVACUATION |
 137                                        ShenandoahHeap::TRAVERSAL);
 138   LIR_Opr mask_reg = gen->new_register(T_INT);
 139   __ move(mask, mask_reg);
 140 
 141   if (TwoOperandLIRForm) {
 142     __ logical_and(flag_val, mask_reg, flag_val);
 143   } else {
 144     LIR_Opr masked_flag = gen->new_register(T_INT);
 145     __ logical_and(flag_val, mask_reg, masked_flag);
 146     flag_val = masked_flag;
 147   }
 148   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
 149 
 150   CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2);
 151   __ branch(lir_cond_notEqual, T_INT, slow);
 152   __ branch_destination(slow->continuation());
 153 
 154   return result;
 155 }
 156 
 157 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj) {
 158   if (!obj->is_register()) {
 159     LIR_Opr obj_reg;
 160     if (obj->is_constant()) {
 161       obj_reg = gen->new_register(T_OBJECT);
 162       __ move(obj, obj_reg);
 163     } else {
 164 #ifdef AARCH64
 165       // AArch64 expects double-size register.
 166       obj_reg = gen->new_pointer_register();
 167 #else
 168       // x86 expects single-size register.
 169       obj_reg = gen->new_register(T_OBJECT);
 170 #endif
 171       __ leal(obj, obj_reg);
 172     }
 173     obj = obj_reg;
 174   }
 175   return obj;
 176 }
 177 
 178 LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
 179   if (ShenandoahStoreValEnqueueBarrier) {
 180     obj = ensure_in_register(gen, obj);
 181     pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
 182   }
 183   return obj;
 184 }
 185 
 186 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
 187   if (access.is_oop()) {
 188     if (ShenandoahSATBBarrier) {
 189       pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
 190     }
 191     value = storeval_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
 192   }
 193   BarrierSetC1::store_at_resolved(access, value);
 194 }
 195 
 196 // TODO: This is here temporarily until backport of JDK-8217717 arrives.
 197 #ifndef PATCHED_ADDR
 198 #define PATCHED_ADDR  (max_jint)
 199 #endif
 200 static LIR_Opr resolve_address_super(LIRAccess& access, bool resolve_in_register) {
 201   DecoratorSet decorators = access.decorators();
 202   bool is_array = (decorators & IS_ARRAY) != 0;
 203   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
 204 
 205   LIRItem& base = access.base().item();
 206   LIR_Opr offset = access.offset().opr();
 207   LIRGenerator *gen = access.gen();
 208 
 209   LIR_Opr addr_opr;
 210   if (is_array) {
 211     addr_opr = LIR_OprFact::address(gen->emit_array_address(base.result(), offset, access.type()));
 212   } else if (needs_patching) {
 213     // we need to patch the offset in the instruction so don't allow
 214     // generate_address to try to be smart about emitting the -1.
 215     // Otherwise the patching code won't know how to find the
 216     // instruction to patch.
 217     addr_opr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, access.type()));
 218   } else {
 219     addr_opr = LIR_OprFact::address(gen->generate_address(base.result(), offset, 0, 0, access.type()));
 220   }
 221 
 222   if (resolve_in_register) {
 223     LIR_Opr resolved_addr = gen->new_pointer_register();
 224     if (needs_patching) {
 225       __ leal(addr_opr, resolved_addr, lir_patch_normal, access.patch_emit_info());
 226       access.clear_decorators(C1_NEEDS_PATCHING);
 227     } else {
 228       __ leal(addr_opr, resolved_addr);
 229     }
 230     return LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
 231   } else {
 232     return addr_opr;
 233   }
 234 }
 235 
 236 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
 237   // We must resolve in register when patching. This is to avoid
 238   // having a patch area in the load barrier stub, since the call
 239   // into the runtime to patch will not have the proper oop map.
 240   const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
 241   return resolve_address_super(access, resolve_in_register || patch_before_barrier);
 242 }
 243 
 244 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
 245   if (!access.is_oop()) {
 246     BarrierSetC1::load_at_resolved(access, result);
 247     return;
 248   }
 249 
 250   LIRGenerator *gen = access.gen();
 251 
 252   if (ShenandoahLoadRefBarrier) {
 253     LIR_Opr tmp = gen->new_register(T_OBJECT);
 254     BarrierSetC1::load_at_resolved(access, tmp);
 255     tmp = load_reference_barrier(access.gen(), tmp, access.resolved_addr());
 256     __ move(tmp, result);
 257   } else {
 258     BarrierSetC1::load_at_resolved(access, result);
 259   }
 260 
 261   if (ShenandoahKeepAliveBarrier) {
 262     DecoratorSet decorators = access.decorators();
 263     bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 264     bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
 265     bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 266     if (is_weak || is_phantom || is_anonymous) {
 267       // Register the value in the referent field with the pre-barrier
 268       LabelObj *Lcont_anonymous;
 269       if (is_anonymous) {
 270         Lcont_anonymous = new LabelObj();
 271         generate_referent_check(access, Lcont_anonymous);
 272       }
 273       pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr /* addr_opr */,
 274                   result /* pre_val */);
 275       if (is_anonymous) {
 276         __ branch_destination(Lcont_anonymous->label());
 277       }
 278     }
 279   }
 280 }
 281 
 282 class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
 283   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 284     ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 285     bs->generate_c1_pre_barrier_runtime_stub(sasm);
 286     return NULL;
 287   }
 288 };
 289 
 290 class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
 291   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 292     ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 293     bs->generate_c1_load_reference_barrier_runtime_stub(sasm);
 294     return NULL;
 295   }
 296 };
 297 
 298 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
 299   C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl;
 300   _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1,
 301                                                               "shenandoah_pre_barrier_slow",
 302                                                               false, &pre_code_gen_cl);
 303   if (ShenandoahLoadRefBarrier) {
 304     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl;
 305     _load_reference_barrier_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
 306                                                                   "shenandoah_load_reference_barrier_slow",
 307                                                                   false, &lrb_code_gen_cl);
 308   }
 309 }