< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page
*** 1348,11 ***
  #endif
    call(AddressLiteral(entry, rh));
  }
  
  int MacroAssembler::ic_check_size() {
!   return LP64_ONLY(14) NOT_LP64(12);
  }
  
  int MacroAssembler::ic_check(int end_alignment) {
    Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
    Register data = rax;
--- 1348,12 ---
  #endif
    call(AddressLiteral(entry, rh));
  }
  
  int MacroAssembler::ic_check_size() {
!   return
+       LP64_ONLY(UseCompactObjectHeaders ? 17 : 14) NOT_LP64(12);
  }
  
  int MacroAssembler::ic_check(int end_alignment) {
    Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
    Register data = rax;

*** 1364,21 ***
    // before the inline cache check here, and not after
    align(end_alignment, offset() + ic_check_size());
  
    int uep_offset = offset();
  
    if (UseCompressedClassPointers) {
      movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
      cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
    } else {
      movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
      cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
    }
  
    // if inline cache check fails, then jump to runtime routine
    jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
!   assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
  
    return uep_offset;
  }
  
  void MacroAssembler::emit_static_call_stub() {
--- 1365,27 ---
    // before the inline cache check here, and not after
    align(end_alignment, offset() + ic_check_size());
  
    int uep_offset = offset();
  
+ #ifdef _LP64
+   if (UseCompactObjectHeaders) {
+     load_nklass_compact(temp, receiver);
+     cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
+   } else
+ #endif
    if (UseCompressedClassPointers) {
      movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
      cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
    } else {
      movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
      cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
    }
  
    // if inline cache check fails, then jump to runtime routine
    jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
!   assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
  
    return uep_offset;
  }
  
  void MacroAssembler::emit_static_call_stub() {

*** 5667,23 ***
    movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
    movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
    movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
  }
  
  void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
  #ifdef _LP64
!   if (UseCompressedClassPointers) {
      movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
      decode_klass_not_null(dst, tmp);
    } else
  #endif
      movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
  }
  
  void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
  #ifdef _LP64
    if (UseCompressedClassPointers) {
      encode_klass_not_null(src, tmp);
--- 5674,38 ---
    movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
    movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
    movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
  }
  
+ #ifdef _LP64
+ void MacroAssembler::load_nklass_compact(Register dst, Register src) {
+   assert(UseCompactObjectHeaders, "expect compact object headers");
+   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
+   shrq(dst, markWord::klass_shift);
+ }
+ #endif
+ 
  void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
+   BLOCK_COMMENT("load_klass");
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
  #ifdef _LP64
!   if (UseCompactObjectHeaders) {
+     load_nklass_compact(dst, src);
+     decode_klass_not_null(dst, tmp);
+   } else if (UseCompressedClassPointers) {
      movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
      decode_klass_not_null(dst, tmp);
    } else
  #endif
+   {
      movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+   }
  }
  
  void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
+   assert(!UseCompactObjectHeaders, "not with compact headers");
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
  #ifdef _LP64
    if (UseCompressedClassPointers) {
      encode_klass_not_null(src, tmp);

*** 5691,10 ***
--- 5713,45 ---
    } else
  #endif
      movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
  }
  
+ void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
+   BLOCK_COMMENT("cmp_klass 1");
+ #ifdef _LP64
+   if (UseCompactObjectHeaders) {
+     load_nklass_compact(tmp, obj);
+     cmpl(klass, tmp);
+   } else if (UseCompressedClassPointers) {
+     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
+   } else
+ #endif
+   {
+     cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
+   }
+ }
+ 
+ void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) {
+   BLOCK_COMMENT("cmp_klass 2");
+ #ifdef _LP64
+   if (UseCompactObjectHeaders) {
+     assert(tmp2 != noreg, "need tmp2");
+     assert_different_registers(src, dst, tmp1, tmp2);
+     load_nklass_compact(tmp1, src);
+     load_nklass_compact(tmp2, dst);
+     cmpl(tmp1, tmp2);
+   } else if (UseCompressedClassPointers) {
+     movl(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
+     cmpl(tmp1, Address(dst, oopDesc::klass_offset_in_bytes()));
+   } else
+ #endif
+   {
+     movptr(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
+     cmpptr(tmp1, Address(dst, oopDesc::klass_offset_in_bytes()));
+   }
+ }
+ 
  void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
                                      Register tmp1, Register thread_tmp) {
    BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
    decorators = AccessInternal::decorator_fixup(decorators, type);
    bool as_raw = (decorators & AS_RAW) != 0;

*** 5738,10 ***
--- 5795,11 ---
    access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
  }
  
  #ifdef _LP64
  void MacroAssembler::store_klass_gap(Register dst, Register src) {
+   assert(!UseCompactObjectHeaders, "Don't use with compact headers");
    if (UseCompressedClassPointers) {
      // Store to klass gap in destination
      movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
    }
  }

*** 5902,12 ***
    if (CompressedKlassPointers::base() != nullptr) {
      mov64(tmp, (int64_t)CompressedKlassPointers::base());
      subq(r, tmp);
    }
    if (CompressedKlassPointers::shift() != 0) {
!     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
-     shrq(r, LogKlassAlignmentInBytes);
    }
  }
  
  void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
    assert_different_registers(src, dst);
--- 5960,11 ---
    if (CompressedKlassPointers::base() != nullptr) {
      mov64(tmp, (int64_t)CompressedKlassPointers::base());
      subq(r, tmp);
    }
    if (CompressedKlassPointers::shift() != 0) {
!     shrq(r, CompressedKlassPointers::shift());
    }
  }
  
  void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
    assert_different_registers(src, dst);

*** 5916,12 ***
      addq(dst, src);
    } else {
      movptr(dst, src);
    }
    if (CompressedKlassPointers::shift() != 0) {
!     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
-     shrq(dst, LogKlassAlignmentInBytes);
    }
  }
  
  void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
    assert_different_registers(r, tmp);
--- 5973,11 ---
      addq(dst, src);
    } else {
      movptr(dst, src);
    }
    if (CompressedKlassPointers::shift() != 0) {
!     shrq(dst, CompressedKlassPointers::shift());
    }
  }
  
  void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
    assert_different_registers(r, tmp);

*** 5929,12 ***
    assert(UseCompressedClassPointers, "should only be used for compressed headers");
    // Cannot assert, unverified entry point counts instructions (see .ad file)
    // vtableStubs also counts instructions in pd_code_size_limit.
    // Also do not verify_oop as this is called by verify_oop.
    if (CompressedKlassPointers::shift() != 0) {
!     assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
-     shlq(r, LogKlassAlignmentInBytes);
    }
    if (CompressedKlassPointers::base() != nullptr) {
      mov64(tmp, (int64_t)CompressedKlassPointers::base());
      addq(r, tmp);
    }
--- 5985,11 ---
    assert(UseCompressedClassPointers, "should only be used for compressed headers");
    // Cannot assert, unverified entry point counts instructions (see .ad file)
    // vtableStubs also counts instructions in pd_code_size_limit.
    // Also do not verify_oop as this is called by verify_oop.
    if (CompressedKlassPointers::shift() != 0) {
!     shlq(r, CompressedKlassPointers::shift());
    }
    if (CompressedKlassPointers::base() != nullptr) {
      mov64(tmp, (int64_t)CompressedKlassPointers::base());
      addq(r, tmp);
    }

*** 5952,21 ***
        CompressedKlassPointers::shift() == 0) {
      // The best case scenario is that there is no base or shift. Then it is already
      // a pointer that needs nothing but a register rename.
      movl(dst, src);
    } else {
!     if (CompressedKlassPointers::base() != nullptr) {
!       mov64(dst, (int64_t)CompressedKlassPointers::base());
!     } else {
!       xorq(dst, dst);
!     }
!     if (CompressedKlassPointers::shift() != 0) {
!       assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
!       assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
!       leaq(dst, Address(dst, src, Address::times_8, 0));
      } else {
        addq(dst, src);
      }
    }
  }
  
  void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
--- 6007,32 ---
        CompressedKlassPointers::shift() == 0) {
      // The best case scenario is that there is no base or shift. Then it is already
      // a pointer that needs nothing but a register rename.
      movl(dst, src);
    } else {
!     if (CompressedKlassPointers::shift() <= Address::times_8) {
!       if (CompressedKlassPointers::base() != nullptr) {
!         mov64(dst, (int64_t)CompressedKlassPointers::base());
!       } else {
!         xorq(dst, dst);
!       }
!       if (CompressedKlassPointers::shift() != 0) {
!         assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
!         leaq(dst, Address(dst, src, Address::times_8, 0));
+       } else {
+         addq(dst, src);
+       }
      } else {
+       if (CompressedKlassPointers::base() != nullptr) {
+         const uint64_t base_right_shifted =
+             (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
+         mov64(dst, base_right_shifted);
+       } else {
+         xorq(dst, dst);
+       }
        addq(dst, src);
+       shlq(dst, CompressedKlassPointers::shift());
      }
    }
  }
  
  void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {

*** 10269,21 ***
  //
  // obj: the object to be locked
  // reg_rax: rax
  // thread: the thread which attempts to lock obj
  // tmp: a temporary register
! void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
    assert(reg_rax == rax, "");
!   assert_different_registers(obj, reg_rax, thread, tmp);
  
    Label push;
    const Register top = tmp;
  
    // Preload the markWord. It is important that this is the first
    // instruction emitted as it is part of C1's null check semantics.
    movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
  
    // Load top.
    movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
  
    // Check if the lock-stack is full.
    cmpl(top, LockStack::end_offset());
--- 10335,26 ---
  //
  // obj: the object to be locked
  // reg_rax: rax
  // thread: the thread which attempts to lock obj
  // tmp: a temporary register
! void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
    assert(reg_rax == rax, "");
!   assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
  
    Label push;
    const Register top = tmp;
  
    // Preload the markWord. It is important that this is the first
    // instruction emitted as it is part of C1's null check semantics.
    movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
  
+   if (UseObjectMonitorTable) {
+     // Clear cache in case fast locking succeeds.
+     movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
+   }
+ 
    // Load top.
    movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
  
    // Check if the lock-stack is full.
    cmpl(top, LockStack::end_offset());

*** 10318,17 ***
  //
  // obj: the object to be unlocked
  // reg_rax: rax
  // thread: the thread
  // tmp: a temporary register
- //
- // x86_32 Note: reg_rax and thread may alias each other due to limited register
- //              availiability.
  void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
    assert(reg_rax == rax, "");
!   assert_different_registers(obj, reg_rax, tmp);
-   LP64_ONLY(assert_different_registers(obj, reg_rax, thread, tmp);)
  
    Label unlocked, push_and_slow;
    const Register top = tmp;
  
    // Check if obj is top of lock-stack.
--- 10389,13 ---
  //
  // obj: the object to be unlocked
  // reg_rax: rax
  // thread: the thread
  // tmp: a temporary register
  void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
    assert(reg_rax == rax, "");
!   assert_different_registers(obj, reg_rax, thread, tmp);
  
    Label unlocked, push_and_slow;
    const Register top = tmp;
  
    // Check if obj is top of lock-stack.

*** 10364,14 ***
    lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
    jcc(Assembler::equal, unlocked);
  
    bind(push_and_slow);
    // Restore lock-stack and handle the unlock in runtime.
-   if (thread == reg_rax) {
-     // On x86_32 we may lose the thread.
-     get_thread(thread);
-   }
  #ifdef ASSERT
    movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
    movptr(Address(thread, top), obj);
  #endif
    addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
--- 10431,10 ---
< prev index next >