< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page
*** 925,36 ***
  static int reg2offset_out(VMReg r) {
    return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
  }
  
  // A long move
! void MacroAssembler::long_move(VMRegPair src, VMRegPair dst) {
  
    // The calling conventions assures us that each VMregpair is either
    // all really one physical register or adjacent stack slots.
  
    if (src.is_single_phys_reg() ) {
      if (dst.is_single_phys_reg()) {
        if (dst.first() != src.first()) {
          mov(dst.first()->as_Register(), src.first()->as_Register());
        }
      } else {
!       assert(dst.is_single_reg(), "not a stack pair");
!       movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
      }
    } else if (dst.is_single_phys_reg()) {
      assert(src.is_single_reg(),  "not a stack pair");
!     movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
    } else {
      assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
!     movq(rax, Address(rbp, reg2offset_in(src.first())));
!     movq(Address(rsp, reg2offset_out(dst.first())), rax);
    }
  }
  
  // A double move
! void MacroAssembler::double_move(VMRegPair src, VMRegPair dst) {
  
    // The calling conventions assures us that each VMregpair is either
    // all really one physical register or adjacent stack slots.
  
    if (src.is_single_phys_reg() ) {
--- 925,37 ---
  static int reg2offset_out(VMReg r) {
    return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
  }
  
  // A long move
! void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  
    // The calling conventions assures us that each VMregpair is either
    // all really one physical register or adjacent stack slots.
  
    if (src.is_single_phys_reg() ) {
      if (dst.is_single_phys_reg()) {
        if (dst.first() != src.first()) {
          mov(dst.first()->as_Register(), src.first()->as_Register());
        }
      } else {
!       assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
!        src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
+       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
      }
    } else if (dst.is_single_phys_reg()) {
      assert(src.is_single_reg(),  "not a stack pair");
!     movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
    } else {
      assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
!     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
!     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
    }
  }
  
  // A double move
! void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  
    // The calling conventions assures us that each VMregpair is either
    // all really one physical register or adjacent stack slots.
  
    if (src.is_single_phys_reg() ) {

*** 963,43 ***
        if ( src.first() != dst.first()) {
          movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
        }
      } else {
        assert(dst.is_single_reg(), "not a stack pair");
!       movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
      }
    } else if (dst.is_single_phys_reg()) {
      assert(src.is_single_reg(),  "not a stack pair");
!     movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
    } else {
      assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
!     movq(rax, Address(rbp, reg2offset_in(src.first())));
!     movq(Address(rsp, reg2offset_out(dst.first())), rax);
    }
  }
  
  
  // A float arg may have to do float reg int reg conversion
! void MacroAssembler::float_move(VMRegPair src, VMRegPair dst) {
    assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
  
    // The calling conventions assures us that each VMregpair is either
    // all really one physical register or adjacent stack slots.
  
    if (src.first()->is_stack()) {
      if (dst.first()->is_stack()) {
!       movl(rax, Address(rbp, reg2offset_in(src.first())));
!       movptr(Address(rsp, reg2offset_out(dst.first())), rax);
      } else {
        // stack to reg
        assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
!       movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
      }
    } else if (dst.first()->is_stack()) {
      // reg to stack
      assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
!     movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
    } else {
      // reg to reg
      // In theory these overlap but the ordering is such that this is likely a nop
      if ( src.first() != dst.first()) {
        movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
--- 964,43 ---
        if ( src.first() != dst.first()) {
          movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
        }
      } else {
        assert(dst.is_single_reg(), "not a stack pair");
!       movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
      }
    } else if (dst.is_single_phys_reg()) {
      assert(src.is_single_reg(),  "not a stack pair");
!     movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
    } else {
      assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
!     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
!     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
    }
  }
  
  
  // A float arg may have to do float reg int reg conversion
! void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
    assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
  
    // The calling conventions assures us that each VMregpair is either
    // all really one physical register or adjacent stack slots.
  
    if (src.first()->is_stack()) {
      if (dst.first()->is_stack()) {
!       movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
!       movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
      } else {
        // stack to reg
        assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
!       movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
      }
    } else if (dst.first()->is_stack()) {
      // reg to stack
      assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
!     movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
    } else {
      // reg to reg
      // In theory these overlap but the ordering is such that this is likely a nop
      if ( src.first() != dst.first()) {
        movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());

*** 1009,25 ***
  
  // On 64 bit we will store integer like items to the stack as
  // 64 bits items (x86_32/64 abi) even though java would only store
  // 32bits for a parameter. On 32bit it will simply be 32 bits
  // So this routine will do 32->32 on 32bit and 32->64 on 64bit
! void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst) {
    if (src.first()->is_stack()) {
      if (dst.first()->is_stack()) {
        // stack to stack
!       movslq(rax, Address(rbp, reg2offset_in(src.first())));
!       movq(Address(rsp, reg2offset_out(dst.first())), rax);
      } else {
        // stack to reg
!       movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
      }
    } else if (dst.first()->is_stack()) {
      // reg to stack
      // Do we really have to sign extend???
      // __ movslq(src.first()->as_Register(), src.first()->as_Register());
!     movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
    } else {
      // Do we really have to sign extend???
      // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
      if (dst.first() != src.first()) {
        movq(dst.first()->as_Register(), src.first()->as_Register());
--- 1010,25 ---
  
  // On 64 bit we will store integer like items to the stack as
  // 64 bits items (x86_32/64 abi) even though java would only store
  // 32bits for a parameter. On 32bit it will simply be 32 bits
  // So this routine will do 32->32 on 32bit and 32->64 on 64bit
! void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
    if (src.first()->is_stack()) {
      if (dst.first()->is_stack()) {
        // stack to stack
!       movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
!       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
      } else {
        // stack to reg
!       movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
      }
    } else if (dst.first()->is_stack()) {
      // reg to stack
      // Do we really have to sign extend???
      // __ movslq(src.first()->as_Register(), src.first()->as_Register());
!     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
    } else {
      // Do we really have to sign extend???
      // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
      if (dst.first() != src.first()) {
        movq(dst.first()->as_Register(), src.first()->as_Register());
< prev index next >