< prev index next >

src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp

Print this page
@@ -171,11 +171,31 @@
    __ cbzw(tmp1, done);
    generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime);
  
    __ bind(runtime);
  
-   __ push_call_clobbered_registers();
+   // save the live input values
+   RegSet saved = RegSet::of(pre_val);
+   FloatRegSet fsaved;
+ 
+   // Barriers might be emitted when converting between (scalarized) calling
+   // conventions for inline types. Save all argument registers before calling
+   // into the runtime.
+   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
+     if (tosca_live) saved += RegSet::of(r0);
+     if (obj != noreg) saved += RegSet::of(obj);
+     saved += RegSet::of(j_rarg0, j_rarg1, j_rarg2, j_rarg3);
+     saved += RegSet::of(j_rarg4, j_rarg5, j_rarg6, j_rarg7);
+ 
+     fsaved += FloatRegSet::of(j_farg0, j_farg1, j_farg2, j_farg3);
+     fsaved += FloatRegSet::of(j_farg4, j_farg5, j_farg6, j_farg7);
+ 
+     __ push(saved, sp);
+     __ push_fp(fsaved, sp);
+   } else {
+     __ push_call_clobbered_registers();
+   }
  
    // Calling the runtime using the regular call_VM_leaf mechanism generates
    // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
    // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
    //

@@ -192,11 +212,16 @@
      __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    } else {
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    }
  
-   __ pop_call_clobbered_registers();
+   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
+   __ pop_fp(fsaved, sp);
+   __ pop(saved, sp);
+   } else {
+     __ pop_call_clobbered_registers();
+   }
  
    __ bind(done);
  
  }
  

@@ -263,14 +288,30 @@
    // If card is young, jump to done
    __ br(Assembler::EQ, done);
    generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
  
    __ bind(runtime);
+ 
    // save the live input values
    RegSet saved = RegSet::of(store_addr);
+   FloatRegSet fsaved;
+ 
+   // Barriers might be emitted when converting between (scalarized) calling
+   // conventions for inline types. Save all argument registers before calling
+   // into the runtime.
+   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
+     saved += RegSet::of(j_rarg0, j_rarg1, j_rarg2, j_rarg3);
+     saved += RegSet::of(j_rarg4, j_rarg5, j_rarg6, j_rarg7);
+ 
+     fsaved += FloatRegSet::of(j_farg0, j_farg1, j_farg2, j_farg3);
+     fsaved += FloatRegSet::of(j_farg4, j_farg5, j_farg6, j_farg7);
+   }
+ 
    __ push(saved, sp);
+   __ push_fp(fsaved, sp);
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
+   __ pop_fp(fsaved, sp);
    __ pop(saved, sp);
  
    __ bind(done);
  }
  

@@ -390,44 +431,61 @@
    }
  }
  
  void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                           Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
+ 
+   bool in_heap = (decorators & IN_HEAP) != 0;
+   bool as_normal = (decorators & AS_NORMAL) != 0;
+   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
+ 
+   bool needs_pre_barrier = as_normal && !dest_uninitialized;
+   bool needs_post_barrier = (val != noreg && in_heap);
+ 
+   assert_different_registers(val, tmp1, tmp2, tmp3);
+ 
    // flatten object address if needed
    if (dst.index() == noreg && dst.offset() == 0) {
      if (dst.base() != tmp3) {
        __ mov(tmp3, dst.base());
      }
    } else {
      __ lea(tmp3, dst);
    }
  
-   g1_write_barrier_pre(masm,
-                        tmp3 /* obj */,
-                        tmp2 /* pre_val */,
-                        rthread /* thread */,
-                        tmp1  /* tmp1 */,
-                        rscratch2  /* tmp2 */,
-                        val != noreg /* tosca_live */,
-                        false /* expand_call */);
+   if (needs_pre_barrier) {
+     g1_write_barrier_pre(masm,
+                          tmp3 /* obj */,
+                          tmp2 /* pre_val */,
+                          rthread /* thread */,
+                          tmp1  /* tmp1 */,
+                          rscratch2  /* tmp2 */,
+                          val != noreg /* tosca_live */,
+                          false /* expand_call */);
+   }
  
    if (val == noreg) {
      BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
    } else {
      // G1 barrier needs uncompressed oop for region cross check.
      Register new_val = val;
-     if (UseCompressedOops) {
-       new_val = rscratch2;
-       __ mov(new_val, val);
+     if (needs_post_barrier) {
+       if (UseCompressedOops) {
+         new_val = rscratch2;
+         __ mov(new_val, val);
+       }
      }
+ 
      BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
-     g1_write_barrier_post(masm,
-                           tmp3 /* store_adr */,
-                           new_val /* new_val */,
-                           rthread /* thread */,
-                           tmp1 /* tmp1 */,
-                           tmp2 /* tmp2 */);
+     if (needs_post_barrier) {
+       g1_write_barrier_post(masm,
+                             tmp3 /* store_adr */,
+                             new_val /* new_val */,
+                             rthread /* thread */,
+                             tmp1 /* tmp1 */,
+                             tmp2 /* tmp2 */);
+     }
    }
  
  }
  
  #ifdef COMPILER1
< prev index next >