< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page
@@ -1348,11 +1348,12 @@
  #endif
    call(AddressLiteral(entry, rh));
  }
  
  int MacroAssembler::ic_check_size() {
-   return LP64_ONLY(14) NOT_LP64(12);
+   return
+       LP64_ONLY(UseCompactObjectHeaders ? 17 : 14) NOT_LP64(12);
  }
  
  int MacroAssembler::ic_check(int end_alignment) {
    Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
    Register data = rax;

@@ -1364,21 +1365,27 @@
    // before the inline cache check here, and not after
    align(end_alignment, offset() + ic_check_size());
  
    int uep_offset = offset();
  
+ #ifdef _LP64
+   if (UseCompactObjectHeaders) {
+     load_nklass_compact(temp, receiver);
+     cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
+   } else
+ #endif
    if (UseCompressedClassPointers) {
      movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
      cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
    } else {
      movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
      cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
    }
  
    // if inline cache check fails, then jump to runtime routine
    jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-   assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
+   assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
  
    return uep_offset;
  }
  
  void MacroAssembler::emit_static_call_stub() {

@@ -5323,23 +5330,38 @@
    movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
    movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
    movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
  }
  
+ #ifdef _LP64
+ void MacroAssembler::load_nklass_compact(Register dst, Register src) {
+   assert(UseCompactObjectHeaders, "expect compact object headers");
+   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
+   shrq(dst, markWord::klass_shift);
+ }
+ #endif
+ 
  void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
+   BLOCK_COMMENT("load_klass");
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
  #ifdef _LP64
-   if (UseCompressedClassPointers) {
+   if (UseCompactObjectHeaders) {
+     load_nklass_compact(dst, src);
+     decode_klass_not_null(dst, tmp);
+   } else if (UseCompressedClassPointers) {
      movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
      decode_klass_not_null(dst, tmp);
    } else
  #endif
+   {
      movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+   }
  }
  
  void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
+   assert(!UseCompactObjectHeaders, "not with compact headers");
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
  #ifdef _LP64
    if (UseCompressedClassPointers) {
      encode_klass_not_null(src, tmp);

@@ -5347,10 +5369,45 @@
    } else
  #endif
      movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
  }
  
+ void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
+   BLOCK_COMMENT("cmp_klass 1");
+ #ifdef _LP64
+   if (UseCompactObjectHeaders) {
+     load_nklass_compact(tmp, obj);
+     cmpl(klass, tmp);
+   } else if (UseCompressedClassPointers) {
+     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
+   } else
+ #endif
+   {
+     cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
+   }
+ }
+ 
+ void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) {
+   BLOCK_COMMENT("cmp_klass 2");
+ #ifdef _LP64
+   if (UseCompactObjectHeaders) {
+     assert(tmp2 != noreg, "need tmp2");
+     assert_different_registers(src, dst, tmp1, tmp2);
+     load_nklass_compact(tmp1, src);
+     load_nklass_compact(tmp2, dst);
+     cmpl(tmp1, tmp2);
+   } else if (UseCompressedClassPointers) {
+     movl(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
+     cmpl(tmp1, Address(dst, oopDesc::klass_offset_in_bytes()));
+   } else
+ #endif
+   {
+     movptr(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
+     cmpptr(tmp1, Address(dst, oopDesc::klass_offset_in_bytes()));
+   }
+ }
+ 
  void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
                                      Register tmp1, Register thread_tmp) {
    BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
    decorators = AccessInternal::decorator_fixup(decorators, type);
    bool as_raw = (decorators & AS_RAW) != 0;

@@ -5394,10 +5451,11 @@
    access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
  }
  
  #ifdef _LP64
  void MacroAssembler::store_klass_gap(Register dst, Register src) {
+   assert(!UseCompactObjectHeaders, "Don't use with compact headers");
    if (UseCompressedClassPointers) {
      // Store to klass gap in destination
      movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
    }
  }

@@ -5558,12 +5616,11 @@
    if (CompressedKlassPointers::base() != nullptr) {
      mov64(tmp, (int64_t)CompressedKlassPointers::base());
      subq(r, tmp);
    }
    if (CompressedKlassPointers::shift() != 0) {
-     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
-     shrq(r, LogKlassAlignmentInBytes);
+     shrq(r, CompressedKlassPointers::shift());
    }
  }
  
  void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
    assert_different_registers(src, dst);

@@ -5572,12 +5629,11 @@
      addq(dst, src);
    } else {
      movptr(dst, src);
    }
    if (CompressedKlassPointers::shift() != 0) {
-     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
-     shrq(dst, LogKlassAlignmentInBytes);
+     shrq(dst, CompressedKlassPointers::shift());
    }
  }
  
  void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
    assert_different_registers(r, tmp);

@@ -5585,12 +5641,11 @@
    assert(UseCompressedClassPointers, "should only be used for compressed headers");
    // Cannot assert, unverified entry point counts instructions (see .ad file)
    // vtableStubs also counts instructions in pd_code_size_limit.
    // Also do not verify_oop as this is called by verify_oop.
    if (CompressedKlassPointers::shift() != 0) {
-     assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
-     shlq(r, LogKlassAlignmentInBytes);
+     shlq(r, CompressedKlassPointers::shift());
    }
    if (CompressedKlassPointers::base() != nullptr) {
      mov64(tmp, (int64_t)CompressedKlassPointers::base());
      addq(r, tmp);
    }

@@ -5608,21 +5663,32 @@
        CompressedKlassPointers::shift() == 0) {
      // The best case scenario is that there is no base or shift. Then it is already
      // a pointer that needs nothing but a register rename.
      movl(dst, src);
    } else {
-     if (CompressedKlassPointers::base() != nullptr) {
-       mov64(dst, (int64_t)CompressedKlassPointers::base());
-     } else {
-       xorq(dst, dst);
-     }
-     if (CompressedKlassPointers::shift() != 0) {
-       assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
-       assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
-       leaq(dst, Address(dst, src, Address::times_8, 0));
+     if (CompressedKlassPointers::shift() <= Address::times_8) {
+       if (CompressedKlassPointers::base() != nullptr) {
+         mov64(dst, (int64_t)CompressedKlassPointers::base());
+       } else {
+         xorq(dst, dst);
+       }
+       if (CompressedKlassPointers::shift() != 0) {
+         assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
+         leaq(dst, Address(dst, src, Address::times_8, 0));
+       } else {
+         addq(dst, src);
+       }
      } else {
+       if (CompressedKlassPointers::base() != nullptr) {
+         const uint64_t base_right_shifted =
+             (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
+         mov64(dst, base_right_shifted);
+       } else {
+         xorq(dst, dst);
+       }
        addq(dst, src);
+       shlq(dst, CompressedKlassPointers::shift());
      }
    }
  }
  
  void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {

@@ -9925,21 +9991,31 @@
  //
  // obj: the object to be locked
  // reg_rax: rax
  // thread: the thread which attempts to lock obj
  // tmp: a temporary register
- void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
+ //
+ // x86_32 Note: basic_lock and thread may alias each other due to limited register
+ //              availiability.
+ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
    assert(reg_rax == rax, "");
    assert_different_registers(obj, reg_rax, thread, tmp);
  
    Label push;
    const Register top = tmp;
  
    // Preload the markWord. It is important that this is the first
    // instruction emitted as it is part of C1's null check semantics.
    movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
  
+   movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::displaced_header_offset_in_bytes()))), 0);
+ 
+ #ifndef _LP64
+   if (thread == basic_lock) {
+     get_thread(thread);
+   }
+ #endif // !_LP64
    // Load top.
    movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
  
    // Check if the lock-stack is full.
    cmpl(top, LockStack::end_offset());
< prev index next >