< prev index next >

src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp

Print this page
*** 243,12 ***
    __ jcc(Assembler::equal, done);
    generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp, done, runtime);
  
    __ bind(runtime);
  
!   // Determine and save the live input values
!   __ push_call_clobbered_registers();
  
    // Calling the runtime using the regular call_VM_leaf mechanism generates
    // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
    // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
    //
--- 243,28 ---
    __ jcc(Assembler::equal, done);
    generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp, done, runtime);
  
    __ bind(runtime);
  
!   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
!     // Barriers might be emitted when converting between (scalarized) calling conventions for inline
+     // types. Save all argument registers before calling into the runtime.
+     // TODO: use push_set() (see JDK-8283327 push/pop_call_clobbered_registers & aarch64 )
+     __ pusha();
+     __ subptr(rsp, 64);
+     __ movdbl(Address(rsp, 0),  j_farg0);
+     __ movdbl(Address(rsp, 8),  j_farg1);
+     __ movdbl(Address(rsp, 16), j_farg2);
+     __ movdbl(Address(rsp, 24), j_farg3);
+     __ movdbl(Address(rsp, 32), j_farg4);
+     __ movdbl(Address(rsp, 40), j_farg5);
+     __ movdbl(Address(rsp, 48), j_farg6);
+     __ movdbl(Address(rsp, 56), j_farg7);
+   } else {
+     // Determine and save the live input values
+     __ push_call_clobbered_registers();
+   }
  
    // Calling the runtime using the regular call_VM_leaf mechanism generates
    // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
    // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
    //

*** 276,11 ***
      __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
    } else {
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    }
  
!   __ pop_call_clobbered_registers();
  
    __ bind(done);
  }
  
  static void generate_post_barrier_fast_path(MacroAssembler* masm,
--- 292,25 ---
      __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
    } else {
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    }
  
!   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
+     // Restore registers
+     __ movdbl(j_farg0, Address(rsp, 0));
+     __ movdbl(j_farg1, Address(rsp, 8));
+     __ movdbl(j_farg2, Address(rsp, 16));
+     __ movdbl(j_farg3, Address(rsp, 24));
+     __ movdbl(j_farg4, Address(rsp, 32));
+     __ movdbl(j_farg5, Address(rsp, 40));
+     __ movdbl(j_farg6, Address(rsp, 48));
+     __ movdbl(j_farg7, Address(rsp, 56));
+     __ addptr(rsp, 64);
+     __ popa();
+   } else {
+     __ pop_call_clobbered_registers();
+   }
  
    __ bind(done);
  }
  
  static void generate_post_barrier_fast_path(MacroAssembler* masm,

*** 348,15 ***
    // If card is young, jump to done
    __ jcc(Assembler::equal, done);
    generate_post_barrier_slow_path(masm, thread, tmp, tmp2, done, runtime);
  
    __ bind(runtime);
!   // save the live input values
!   RegSet saved = RegSet::of(store_addr NOT_LP64(COMMA thread));
!   __ push_set(saved);
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp, thread);
!   __ pop_set(saved);
  
    __ bind(done);
  }
  
  #if defined(COMPILER2)
--- 378,37 ---
    // If card is young, jump to done
    __ jcc(Assembler::equal, done);
    generate_post_barrier_slow_path(masm, thread, tmp, tmp2, done, runtime);
  
    __ bind(runtime);
!   // Barriers might be emitted when converting between (scalarized) calling conventions for inline
!   // types. Save all argument registers before calling into the runtime.
!   // TODO: use push_set() (see JDK-8283327 push/pop_call_clobbered_registers & aarch64)
+   __ pusha();
+   __ subptr(rsp, 64);
+   __ movdbl(Address(rsp, 0),  j_farg0);
+   __ movdbl(Address(rsp, 8),  j_farg1);
+   __ movdbl(Address(rsp, 16), j_farg2);
+   __ movdbl(Address(rsp, 24), j_farg3);
+   __ movdbl(Address(rsp, 32), j_farg4);
+   __ movdbl(Address(rsp, 40), j_farg5);
+   __ movdbl(Address(rsp, 48), j_farg6);
+   __ movdbl(Address(rsp, 56), j_farg7);
+ 
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp, thread);
! 
+   // Restore registers
+   __ movdbl(j_farg0, Address(rsp, 0));
+   __ movdbl(j_farg1, Address(rsp, 8));
+   __ movdbl(j_farg2, Address(rsp, 16));
+   __ movdbl(j_farg3, Address(rsp, 24));
+   __ movdbl(j_farg4, Address(rsp, 32));
+   __ movdbl(j_farg5, Address(rsp, 40));
+   __ movdbl(j_farg6, Address(rsp, 48));
+   __ movdbl(j_farg7, Address(rsp, 56));
+   __ addptr(rsp, 64);
+   __ popa();
  
    __ bind(done);
  }
  
  #if defined(COMPILER2)

*** 462,12 ***
  
  void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                           Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
    bool in_heap = (decorators & IN_HEAP) != 0;
    bool as_normal = (decorators & AS_NORMAL) != 0;
  
!   bool needs_pre_barrier = as_normal;
    bool needs_post_barrier = val != noreg && in_heap;
  
    Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
    // flatten object address if needed
    // We do it regardless of precise because we need the registers
--- 514,13 ---
  
  void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                           Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
    bool in_heap = (decorators & IN_HEAP) != 0;
    bool as_normal = (decorators & AS_NORMAL) != 0;
+   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  
!   bool needs_pre_barrier = as_normal && !dest_uninitialized;
    bool needs_post_barrier = val != noreg && in_heap;
  
    Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
    // flatten object address if needed
    // We do it regardless of precise because we need the registers
< prev index next >