< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page
@@ -518,17 +518,19 @@
    jcc(Assembler::zero, L);
  
    subq(rsp, 8);
    {
      call(RuntimeAddress(entry_point));
+     oopmap_metadata(-1);
    }
    addq(rsp, 8);
    jmp(E);
  
    bind(L);
    {
      call(RuntimeAddress(entry_point));
+     oopmap_metadata(-1);
    }
  
    bind(E);
  
  #ifdef _WIN64

@@ -816,10 +818,19 @@
    pop_CPU_state();
    mov(rsp, rbp);
    pop(rbp);
  }
  
+ void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
+ #ifdef ASSERT
+   Label OK;
+   jcc(cc, OK);
+   stop(msg);
+   bind(OK);
+ #endif
+ }
+ 
  void MacroAssembler::print_state() {
    address rip = pc();
    pusha();            // get regs on stack
    push(rbp);
    movq(rbp, rsp);

@@ -1129,10 +1140,15 @@
  
  #endif // _LP64
  
  // Now versions that are common to 32/64 bit
  
+ void MacroAssembler::oopmap_metadata(int index) {
+   // if (index != -1) tty->print_cr("oopmap_metadata %d", index);
+   // mov64(r10, 1234); // TODO: Add a new relocInfo with external semantics. see relocInfo::metadata_type
+ }
+ 
  void MacroAssembler::addptr(Register dst, int32_t imm32) {
    LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
  }
  
  void MacroAssembler::addptr(Register dst, Register src) {

@@ -1191,10 +1207,30 @@
    if (target % modulus != 0) {
      nop(modulus - (target % modulus));
    }
  }
  
+ void MacroAssembler::push_f(XMMRegister r) {
+   subptr(rsp, wordSize);
+   movflt(Address(rsp, 0), r);
+ }
+ 
+ void MacroAssembler::pop_f(XMMRegister r) {
+   movflt(r, Address(rsp, 0));
+   addptr(rsp, wordSize);
+ }
+ 
+ void MacroAssembler::push_d(XMMRegister r) {
+   subptr(rsp, 2 * wordSize);
+   movdbl(Address(rsp, 0), r);
+ }
+ 
+ void MacroAssembler::pop_d(XMMRegister r) {
+   movdbl(r, Address(rsp, 0));
+   addptr(rsp, 2 * Interpreter::stackElementSize);
+ }
+ 
  void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg) {
    // Used in sign-masking with aligned address.
    assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
    if (reachable(src)) {
      Assembler::andpd(dst, as_Address(src));

@@ -1648,10 +1684,24 @@
    pass_arg1(this, arg_1);
    pass_arg0(this, arg_0);
    call_VM_leaf(entry_point, 3);
  }
  
+ void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
+   LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
+   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+   pass_arg3(this, arg_3);
+   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
+   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+   pass_arg2(this, arg_2);
+   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+   pass_arg1(this, arg_1);
+   pass_arg0(this, arg_0);
+   call_VM_leaf(entry_point, 3);
+ }
+ 
  void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
    pass_arg0(this, arg_0);
    MacroAssembler::call_VM_leaf_base(entry_point, 1);
  }
  

@@ -1967,20 +2017,28 @@
  void MacroAssembler::enter() {
    push(rbp);
    mov(rbp, rsp);
  }
  
+ void MacroAssembler::post_call_nop() {
+   emit_int8((int8_t)0x0f);
+   emit_int8((int8_t)0x1f);
+   emit_int8((int8_t)0x84);
+   emit_int8((int8_t)0x00);
+   emit_int32(0x00);
+ }
+ 
  // A 5 byte nop that is safe for patching (see patch_verified_entry)
  void MacroAssembler::fat_nop() {
    if (UseAddressNop) {
      addr_nop_5();
    } else {
-     emit_int8(0x26); // es:
-     emit_int8(0x2e); // cs:
-     emit_int8(0x64); // fs:
-     emit_int8(0x65); // gs:
-     emit_int8((unsigned char)0x90);
+     emit_int8((int8_t)0x26); // es:
+     emit_int8((int8_t)0x2e); // cs:
+     emit_int8((int8_t)0x64); // fs:
+     emit_int8((int8_t)0x65); // gs:
+     emit_int8((int8_t)0x90);
    }
  }
  
  #ifndef _LP64
  void MacroAssembler::fcmp(Register tmp) {

@@ -2673,10 +2731,65 @@
      lea(rscratch1, src);
      Assembler::movdqa(dst, Address(rscratch1, 0));
    }
  }
  
+ #ifdef _LP64
+   // Move Aligned, possibly non-temporal
+   void MacroAssembler::movqa(Address dst, Register src, bool nt) {
+     if (nt) {
+       Assembler::movntq(dst, src);
+     } else {
+       Assembler::movq(dst, src);
+     }
+   }
+ 
+   void MacroAssembler::movdqa(Address dst, XMMRegister src, bool nt) {
+     if (nt) {
+       Assembler::movntdq(dst, src);
+     } else {
+       Assembler::movdqu(dst, src);
+     }
+   }
+   void MacroAssembler::vmovdqa(Address dst, XMMRegister src, bool nt) {
+     if (nt) {
+       Assembler::vmovntdq(dst, src);
+     } else {
+       Assembler::vmovdqu(dst, src);
+     }
+   }
+   void MacroAssembler::evmovdqa(Address dst, XMMRegister src, int vector_len, bool nt) {
+     if (nt) {
+       Assembler::evmovntdq(dst, src, vector_len);
+     } else {
+       Assembler::evmovdqal(dst, src, vector_len);
+     }
+   }
+ 
+   void MacroAssembler::movdqa(XMMRegister dst, Address src, bool nt) {
+     if (nt) {
+       Assembler::movntdqa(dst, src);
+     } else {
+       Assembler::movdqu(dst, src); // use unaligned load
+     }
+   }
+   void MacroAssembler::vmovdqa(XMMRegister dst, Address src, bool nt) {
+     if (nt) {
+       Assembler::vmovntdqa(dst, src);
+     } else {
+       Assembler::vmovdqu(dst, src); // use unaligned load
+     }
+   }
+   void MacroAssembler::evmovdqa(XMMRegister dst, Address src, int vector_len, bool nt) {
+     if (nt) {
+       Assembler::evmovntdqa(dst, src, vector_len);
+     } else {
+       Assembler::evmovdqul(dst, src, vector_len); // use unaligned load
+     }
+   }
+ #endif
+ 
  void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
    if (reachable(src)) {
      Assembler::movsd(dst, as_Address(src));
    } else {
      lea(rscratch1, src);

@@ -2790,10 +2903,53 @@
    // Make sure rsp stays 16-byte aligned
    LP64_ONLY(subq(rsp, 8));
    pusha();
  }
  
+ void MacroAssembler::push_cont_fastpath(Register java_thread) {
+   Label done;
+   cmpptr(rsp, Address(java_thread, JavaThread::cont_fastpath_offset()));
+   jccb(Assembler::belowEqual, done);
+   movptr(Address(java_thread, JavaThread::cont_fastpath_offset()), rsp);
+   bind(done);
+ }
+ 
+ void MacroAssembler::pop_cont_fastpath(Register java_thread) {
+   Label done;
+   cmpptr(rsp, Address(java_thread, JavaThread::cont_fastpath_offset()));
+   jccb(Assembler::below, done);
+   movptr(Address(java_thread, JavaThread::cont_fastpath_offset()), 0);
+   bind(done);
+ }
+ 
+ void MacroAssembler::inc_held_monitor_count(Register java_thread) {
+   incrementl(Address(java_thread, JavaThread::held_monitor_count_offset()));
+ }
+ 
+ void MacroAssembler::dec_held_monitor_count(Register java_thread) {
+   decrementl(Address(java_thread, JavaThread::held_monitor_count_offset()));
+ }
+ 
+ void MacroAssembler::reset_held_monitor_count(Register java_thread) {
+   movl(Address(java_thread, JavaThread::held_monitor_count_offset()), (int32_t)0);
+ }
+ 
+ #ifdef ASSERT
+ void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
+ #ifdef _LP64
+   Label no_cont;
+   movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
+   testl(cont, cont);
+   jcc(Assembler::zero, no_cont);
+   stop(name);
+   bind(no_cont);
+ #else
+   Unimplemented();
+ #endif
+ }
+ #endif
+ 
  void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register
    if (!java_thread->is_valid()) {
      java_thread = rdi;
      get_thread(java_thread);
    }
< prev index next >