< prev index next >

src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp

Print this page
*** 153,11 ***
    __ str(pre_val, Address(tmp1, 0));
    __ b(done);
  
    __ bind(runtime);
  
!   __ push_call_clobbered_registers();
  
    // Calling the runtime using the regular call_VM_leaf mechanism generates
    // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
    // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
    //
--- 153,31 ---
    __ str(pre_val, Address(tmp1, 0));
    __ b(done);
  
    __ bind(runtime);
  
!   // save the live input values
+   RegSet saved = RegSet::of(pre_val);
+   FloatRegSet fsaved;
+ 
+   // Barriers might be emitted when converting between (scalarized) calling
+   // conventions for inline types. Save all argument registers before calling
+   // into the runtime.
+   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
+     if (tosca_live) saved += RegSet::of(r0);
+     if (obj != noreg) saved += RegSet::of(obj);
+     saved += RegSet::of(j_rarg0, j_rarg1, j_rarg2, j_rarg3);
+     saved += RegSet::of(j_rarg4, j_rarg5, j_rarg6, j_rarg7);
+ 
+     fsaved += FloatRegSet::of(j_farg0, j_farg1, j_farg2, j_farg3);
+     fsaved += FloatRegSet::of(j_farg4, j_farg5, j_farg6, j_farg7);
+ 
+     __ push(saved, sp);
+     __ push_fp(fsaved, sp);
+   } else {
+     __ push_call_clobbered_registers();
+   }
  
    // Calling the runtime using the regular call_VM_leaf mechanism generates
    // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
    // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
    //

*** 174,11 ***
      __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    } else {
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    }
  
!   __ pop_call_clobbered_registers();
  
    __ bind(done);
  
  }
  
--- 194,16 ---
      __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    } else {
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    }
  
!   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
+   __ pop_fp(fsaved, sp);
+   __ pop(saved, sp);
+   } else {
+     __ pop_call_clobbered_registers();
+   }
  
    __ bind(done);
  
  }
  

*** 214,10 ***
--- 239,12 ---
  
    __ cbz(new_val, done);
  
    // storing region crossing non-null, is card already dirty?
  
+   assert_different_registers(store_addr, thread, tmp1, tmp2, rscratch1);
+ 
    const Register card_addr = tmp1;
  
    __ lsr(card_addr, store_addr, CardTable::card_shift());
  
    // get the address of the card

*** 247,14 ***
--- 274,30 ---
    __ ldr(tmp2, buffer);
    __ str(card_addr, Address(tmp2, rscratch1));
    __ b(done);
  
    __ bind(runtime);
+ 
    // save the live input values
    RegSet saved = RegSet::of(store_addr);
+   FloatRegSet fsaved;
+ 
+   // Barriers might be emitted when converting between (scalarized) calling
+   // conventions for inline types. Save all argument registers before calling
+   // into the runtime.
+   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
+     saved += RegSet::of(j_rarg0, j_rarg1, j_rarg2, j_rarg3);
+     saved += RegSet::of(j_rarg4, j_rarg5, j_rarg6, j_rarg7);
+ 
+     fsaved += FloatRegSet::of(j_farg0, j_farg1, j_farg2, j_farg3);
+     fsaved += FloatRegSet::of(j_farg4, j_farg5, j_farg6, j_farg7);
+   }
+ 
    __ push(saved, sp);
+   __ push_fp(fsaved, sp);
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
+   __ pop_fp(fsaved, sp);
    __ pop(saved, sp);
  
    __ bind(done);
  }
  

*** 282,44 ***
    }
  }
  
  void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                           Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
    // flatten object address if needed
    if (dst.index() == noreg && dst.offset() == 0) {
      if (dst.base() != tmp3) {
        __ mov(tmp3, dst.base());
      }
    } else {
      __ lea(tmp3, dst);
    }
  
!   g1_write_barrier_pre(masm,
!                        tmp3 /* obj */,
!                        tmp2 /* pre_val */,
!                        rthread /* thread */,
!                        tmp1  /* tmp1 */,
!                        rscratch2  /* tmp2 */,
!                        val != noreg /* tosca_live */,
!                        false /* expand_call */);
  
    if (val == noreg) {
      BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
    } else {
      // G1 barrier needs uncompressed oop for region cross check.
      Register new_val = val;
!     if (UseCompressedOops) {
!       new_val = rscratch2;
!       __ mov(new_val, val);
      }
      BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
!     g1_write_barrier_post(masm,
!                           tmp3 /* store_adr */,
!                           new_val /* new_val */,
!                           rthread /* thread */,
!                           tmp1 /* tmp1 */,
!                           tmp2 /* tmp2 */);
    }
  
  }
  
  #ifdef COMPILER1
--- 325,61 ---
    }
  }
  
  void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                           Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
+ 
+   bool in_heap = (decorators & IN_HEAP) != 0;
+   bool as_normal = (decorators & AS_NORMAL) != 0;
+   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
+ 
+   bool needs_pre_barrier = as_normal && !dest_uninitialized;
+   bool needs_post_barrier = (val != noreg && in_heap);
+ 
+   assert_different_registers(val, tmp1, tmp2, tmp3);
+ 
    // flatten object address if needed
    if (dst.index() == noreg && dst.offset() == 0) {
      if (dst.base() != tmp3) {
        __ mov(tmp3, dst.base());
      }
    } else {
      __ lea(tmp3, dst);
    }
  
!   if (needs_pre_barrier) {
!     g1_write_barrier_pre(masm,
!                          tmp3 /* obj */,
!                          tmp2 /* pre_val */,
!                          rthread /* thread */,
!                          tmp1  /* tmp1 */,
!                          rscratch2  /* tmp2 */,
!                          val != noreg /* tosca_live */,
+                          false /* expand_call */);
+   }
  
    if (val == noreg) {
      BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
    } else {
      // G1 barrier needs uncompressed oop for region cross check.
      Register new_val = val;
!     if (needs_post_barrier) {
!       if (UseCompressedOops) {
!         new_val = rscratch2;
+         __ mov(new_val, val);
+       }
      }
+ 
      BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
!     if (needs_post_barrier) {
!       g1_write_barrier_post(masm,
!                             tmp3 /* store_adr */,
!                             new_val /* new_val */,
!                             rthread /* thread */,
!                             tmp1 /* tmp1 */,
+                             tmp2 /* tmp2 */);
+     }
    }
  
  }
  
  #ifdef COMPILER1
< prev index next >