< prev index next >

src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp

Print this page

        

@@ -103,23 +103,25 @@
 
   __ branch(lir_cond_notEqual, T_INT, slow);
   __ branch_destination(slow->continuation());
 }
 
-LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj) {
+LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) {
   if (ShenandoahLoadRefBarrier) {
-    return load_reference_barrier_impl(gen, obj);
+    return load_reference_barrier_impl(gen, obj, addr);
   } else {
     return obj;
   }
 }
 
-LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj) {
+LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) {
   assert(ShenandoahLoadRefBarrier, "Should be enabled");
 
   obj = ensure_in_register(gen, obj);
   assert(obj->is_register(), "must be a register at this point");
+  addr = ensure_in_register(gen, addr);
+  assert(addr->is_register(), "must be a register at this point");
   LIR_Opr result = gen->result_register_for(obj->value_type());
   __ move(obj, result);
   LIR_Opr tmp1 = gen->new_register(T_OBJECT);
   LIR_Opr tmp2 = gen->new_register(T_OBJECT);
 

@@ -144,23 +146,31 @@
     __ logical_and(flag_val, mask_reg, masked_flag);
     flag_val = masked_flag;
   }
   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
 
-  CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, result, tmp1, tmp2);
+  CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2);
   __ branch(lir_cond_notEqual, T_INT, slow);
   __ branch_destination(slow->continuation());
 
   return result;
 }
 
 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj) {
   if (!obj->is_register()) {
-    LIR_Opr obj_reg = gen->new_register(T_OBJECT);
+    LIR_Opr obj_reg;
     if (obj->is_constant()) {
+      obj_reg = gen->new_register(T_OBJECT);
       __ move(obj, obj_reg);
     } else {
+#ifdef AARCH64
+      // AArch64 expects double-size register.
+      obj_reg = gen->new_pointer_register();
+#else
+      // x86 expects single-size register.
+      obj_reg = gen->new_register(T_OBJECT);
+#endif
       __ leal(obj, obj_reg);
     }
     obj = obj_reg;
   }
   return obj;

@@ -182,10 +192,18 @@
     value = storeval_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
   }
   BarrierSetC1::store_at_resolved(access, value);
 }
 
+LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
+  // We must resolve in register when patching. This is to avoid
+  // having a patch area in the load barrier stub, since the call
+  // into the runtime to patch will not have the proper oop map.
+  const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
+  return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
+}
+
 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
   if (!access.is_oop()) {
     BarrierSetC1::load_at_resolved(access, result);
     return;
   }

@@ -208,11 +226,11 @@
   }
 
   if (ShenandoahLoadRefBarrier) {
     LIR_Opr tmp = gen->new_register(T_OBJECT);
     BarrierSetC1::load_at_resolved(access, tmp);
-    tmp = load_reference_barrier(access.gen(), tmp);
+    tmp = load_reference_barrier(access.gen(), tmp, access.resolved_addr());
     __ move(tmp, result);
   } else {
     BarrierSetC1::load_at_resolved(access, result);
   }
 
< prev index next >