< prev index next >

src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp

Print this page
*** 208,11 ***
    __ jcc(Assembler::equal, done);
  
    // Can we store original value in the thread's buffer?
    // Is index == 0?
    // (The index field is typed as size_t.)
- 
    __ movptr(tmp, index);                   // tmp := *index_adr
    __ cmpptr(tmp, 0);                       // tmp == 0?
    __ jcc(Assembler::equal, runtime);       // If yes, goto runtime
  
    __ subptr(tmp, wordSize);                // tmp := tmp - wordSize
--- 208,10 ---

*** 223,12 ***
    __ movptr(Address(tmp, 0), pre_val);
    __ jmp(done);
  
    __ bind(runtime);
  
!   // Determine and save the live input values
!   __ push_call_clobbered_registers();
  
    // Calling the runtime using the regular call_VM_leaf mechanism generates
    // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
    // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
    //
--- 222,28 ---
    __ movptr(Address(tmp, 0), pre_val);
    __ jmp(done);
  
    __ bind(runtime);
  
!   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
!     // Barriers might be emitted when converting between (scalarized) calling conventions for inline
+     // types. Save all argument registers before calling into the runtime.
+     // TODO: use push_set() (see JDK-8283327 push/pop_call_clobbered_registers & aarch64 )
+     __ pusha();
+     __ subptr(rsp, 64);
+     __ movdbl(Address(rsp, 0),  j_farg0);
+     __ movdbl(Address(rsp, 8),  j_farg1);
+     __ movdbl(Address(rsp, 16), j_farg2);
+     __ movdbl(Address(rsp, 24), j_farg3);
+     __ movdbl(Address(rsp, 32), j_farg4);
+     __ movdbl(Address(rsp, 40), j_farg5);
+     __ movdbl(Address(rsp, 48), j_farg6);
+     __ movdbl(Address(rsp, 56), j_farg7);
+   } else {
+     // Determine and save the live input values
+     __ push_call_clobbered_registers();
+   }
  
    // Calling the runtime using the regular call_VM_leaf mechanism generates
    // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
    // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
    //

*** 256,11 ***
      __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
    } else {
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    }
  
!   __ pop_call_clobbered_registers();
  
    __ bind(done);
  }
  
  void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
--- 271,25 ---
      __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
    } else {
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
    }
  
!   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
+     // Restore registers
+     __ movdbl(j_farg0, Address(rsp, 0));
+     __ movdbl(j_farg1, Address(rsp, 8));
+     __ movdbl(j_farg2, Address(rsp, 16));
+     __ movdbl(j_farg3, Address(rsp, 24));
+     __ movdbl(j_farg4, Address(rsp, 32));
+     __ movdbl(j_farg5, Address(rsp, 40));
+     __ movdbl(j_farg6, Address(rsp, 48));
+     __ movdbl(j_farg7, Address(rsp, 56));
+     __ addptr(rsp, 64);
+     __ popa();
+   } else {
+     __ pop_call_clobbered_registers();
+   }
  
    __ bind(done);
  }
  
  void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,

*** 329,25 ***
    __ addptr(tmp2, buffer);
    __ movptr(Address(tmp2, 0), card_addr);
    __ jmp(done);
  
    __ bind(runtime);
!   // save the live input values
!   RegSet saved = RegSet::of(store_addr NOT_LP64(COMMA thread));
!   __ push_set(saved);
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
!   __ pop_set(saved);
  
    __ bind(done);
  }
  
  void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                           Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
    bool in_heap = (decorators & IN_HEAP) != 0;
    bool as_normal = (decorators & AS_NORMAL) != 0;
  
!   bool needs_pre_barrier = as_normal;
    bool needs_post_barrier = val != noreg && in_heap;
  
    Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
    // flatten object address if needed
    // We do it regardless of precise because we need the registers
--- 358,54 ---
    __ addptr(tmp2, buffer);
    __ movptr(Address(tmp2, 0), card_addr);
    __ jmp(done);
  
    __ bind(runtime);
!   // Barriers might be emitted when converting between (scalarized) calling conventions for inline
!   // types. Save all argument registers before calling into the runtime.
!   // TODO: use push_set() (see JDK-8283327 push/pop_call_clobbered_registers & aarch64)
+   __ pusha();
+   __ subptr(rsp, 64);
+   __ movdbl(Address(rsp, 0),  j_farg0);
+   __ movdbl(Address(rsp, 8),  j_farg1);
+   __ movdbl(Address(rsp, 16), j_farg2);
+   __ movdbl(Address(rsp, 24), j_farg3);
+   __ movdbl(Address(rsp, 32), j_farg4);
+   __ movdbl(Address(rsp, 40), j_farg5);
+   __ movdbl(Address(rsp, 48), j_farg6);
+   __ movdbl(Address(rsp, 56), j_farg7);
+ 
+ #ifdef _LP64
+   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, r15_thread);
+ #else
+   __ push(thread);
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
!   __ pop(thread);
+ #endif
+ 
+   // Restore registers
+   __ movdbl(j_farg0, Address(rsp, 0));
+   __ movdbl(j_farg1, Address(rsp, 8));
+   __ movdbl(j_farg2, Address(rsp, 16));
+   __ movdbl(j_farg3, Address(rsp, 24));
+   __ movdbl(j_farg4, Address(rsp, 32));
+   __ movdbl(j_farg5, Address(rsp, 40));
+   __ movdbl(j_farg6, Address(rsp, 48));
+   __ movdbl(j_farg7, Address(rsp, 56));
+   __ addptr(rsp, 64);
+   __ popa();
  
    __ bind(done);
  }
  
  void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                           Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
    bool in_heap = (decorators & IN_HEAP) != 0;
    bool as_normal = (decorators & AS_NORMAL) != 0;
+   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  
!   bool needs_pre_barrier = as_normal && !dest_uninitialized;
    bool needs_post_barrier = val != noreg && in_heap;
  
    Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
    // flatten object address if needed
    // We do it regardless of precise because we need the registers
< prev index next >