< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "asm/assembler.hpp"
  28 #include "asm/assembler.inline.hpp"
  29 #include "compiler/compiler_globals.hpp"
  30 #include "compiler/disassembler.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/barrierSetAssembler.hpp"
  33 #include "gc/shared/collectedHeap.inline.hpp"
  34 #include "gc/shared/tlab_globals.hpp"
  35 #include "interpreter/bytecodeHistogram.hpp"
  36 #include "interpreter/interpreter.hpp"

  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.hpp"
  39 #include "oops/accessDecorators.hpp"

  40 #include "oops/compressedOops.inline.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "prims/methodHandles.hpp"
  43 #include "runtime/flags/flagSetting.hpp"
  44 #include "runtime/interfaceSupport.inline.hpp"
  45 #include "runtime/jniHandles.hpp"
  46 #include "runtime/objectMonitor.hpp"
  47 #include "runtime/os.hpp"
  48 #include "runtime/safepoint.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "runtime/stubRoutines.hpp"
  52 #include "runtime/thread.hpp"

  53 #include "utilities/macros.hpp"
  54 #include "crc32c.h"
  55 
  56 #ifdef PRODUCT
  57 #define BLOCK_COMMENT(str) /* nothing */
  58 #define STOP(error) stop(error)
  59 #else
  60 #define BLOCK_COMMENT(str) block_comment(str)
  61 #define STOP(error) block_comment(error); stop(error)
  62 #endif
  63 
  64 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  65 
  66 #ifdef ASSERT
  67 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
  68 #endif
  69 
  70 static Assembler::Condition reverse[] = {
  71     Assembler::noOverflow     /* overflow      = 0x0 */ ,
  72     Assembler::overflow       /* noOverflow    = 0x1 */ ,

3759   }
3760 
3761   if (offset == -1) {
3762     addptr(rsp, aligned_size);
3763   }
3764 }
3765 
3766 // Defines obj, preserves var_size_in_bytes
3767 void MacroAssembler::eden_allocate(Register thread, Register obj,
3768                                    Register var_size_in_bytes,
3769                                    int con_size_in_bytes,
3770                                    Register t1,
3771                                    Label& slow_case) {
3772   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3773   bs->eden_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
3774 }
3775 
3776 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
3777 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
3778   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
3779   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
3780   Label done;
3781 
3782   testptr(length_in_bytes, length_in_bytes);
3783   jcc(Assembler::zero, done);
3784 













3785   // initialize topmost word, divide index by 2, check if odd and test if zero
3786   // note: for the remaining code to work, index must be a multiple of BytesPerWord
3787 #ifdef ASSERT
3788   {
3789     Label L;
3790     testptr(length_in_bytes, BytesPerWord - 1);
3791     jcc(Assembler::zero, L);
3792     stop("length must be a multiple of BytesPerWord");
3793     bind(L);
3794   }
3795 #endif
3796   Register index = length_in_bytes;
3797   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
3798   if (UseIncDec) {
3799     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
3800   } else {
3801     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
3802     shrptr(index, 1);
3803   }
3804 #ifndef _LP64
3805   // index could have not been a multiple of 8 (i.e., bit 2 was set)
3806   {
3807     Label even;
3808     // note: if index was a multiple of 8, then it cannot
3809     //       be 0 now otherwise it must have been 0 before
3810     //       => if it is even, we don't need to check for 0 again
3811     jcc(Assembler::carryClear, even);
3812     // clear topmost word (no jump would be needed if conditional assignment worked here)
3813     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
3814     // index could be 0 now, must check again
3815     jcc(Assembler::zero, done);
3816     bind(even);
3817   }

4711 
4712 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
4713   // get mirror
4714   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4715   load_method_holder(mirror, method);
4716   movptr(mirror, Address(mirror, mirror_offset));
4717   resolve_oop_handle(mirror, tmp);
4718 }
4719 
4720 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4721   load_method_holder(rresult, rmethod);
4722   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4723 }
4724 
4725 void MacroAssembler::load_method_holder(Register holder, Register method) {
4726   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
4727   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
4728   movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
4729 }
4730 
4731 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
4732   assert_different_registers(src, tmp);
4733   assert_different_registers(dst, tmp);
4734 #ifdef _LP64
4735   if (UseCompressedClassPointers) {
4736     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4737     decode_klass_not_null(dst, tmp);
4738   } else
4739 #endif
4740     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
























4741 }

4742 
4743 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
4744   assert_different_registers(src, tmp);
4745   assert_different_registers(dst, tmp);
4746 #ifdef _LP64
4747   if (UseCompressedClassPointers) {
4748     encode_klass_not_null(src, tmp);
4749     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
4750   } else














4751 #endif
4752     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
4753 }
4754 






4755 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
4756                                     Register tmp1, Register thread_tmp) {
4757   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4758   decorators = AccessInternal::decorator_fixup(decorators);
4759   bool as_raw = (decorators & AS_RAW) != 0;
4760   if (as_raw) {
4761     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4762   } else {
4763     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4764   }
4765 }
4766 
4767 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
4768                                      Register tmp1, Register tmp2, Register tmp3) {
4769   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4770   decorators = AccessInternal::decorator_fixup(decorators);
4771   bool as_raw = (decorators & AS_RAW) != 0;
4772   if (as_raw) {
4773     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3);
4774   } else {

4781   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4782 }
4783 
4784 // Doesn't do verification, generates fixed size code
4785 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4786                                             Register thread_tmp, DecoratorSet decorators) {
4787   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
4788 }
4789 
4790 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
4791                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
4792   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2, tmp3);
4793 }
4794 
4795 // Used for storing NULLs.
4796 void MacroAssembler::store_heap_oop_null(Address dst) {
4797   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
4798 }
4799 
4800 #ifdef _LP64
4801 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4802   if (UseCompressedClassPointers) {
4803     // Store to klass gap in destination
4804     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
4805   }
4806 }
4807 
4808 #ifdef ASSERT
4809 void MacroAssembler::verify_heapbase(const char* msg) {
4810   assert (UseCompressedOops, "should be compressed");
4811   assert (Universe::heap() != NULL, "java heap should be initialized");
4812   if (CheckCompressedOops) {
4813     Label ok;
4814     const auto src2 = ExternalAddress((address)CompressedOops::ptrs_base_addr());
4815     assert(!src2.is_lval(), "should not be lval");
4816     const bool is_src2_reachable = reachable(src2);
4817     if (!is_src2_reachable) {
4818       push(rscratch1);  // cmpptr trashes rscratch1
4819     }
4820     cmpptr(r12_heapbase, src2);
4821     jcc(Assembler::equal, ok);
4822     STOP(msg);
4823     bind(ok);
4824     if (!is_src2_reachable) {
4825       pop(rscratch1);
4826     }
4827   }

4940     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4941     if (LogMinObjAlignmentInBytes == Address::times_8) {
4942       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
4943     } else {
4944       if (dst != src) {
4945         movq(dst, src);
4946       }
4947       shlq(dst, LogMinObjAlignmentInBytes);
4948       if (CompressedOops::base() != NULL) {
4949         addq(dst, r12_heapbase);
4950       }
4951     }
4952   } else {
4953     assert (CompressedOops::base() == NULL, "sanity");
4954     if (dst != src) {
4955       movq(dst, src);
4956     }
4957   }
4958 }
4959 






























































4960 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
4961   assert_different_registers(r, tmp);
4962   if (CompressedKlassPointers::base() != NULL) {











4963     mov64(tmp, (int64_t)CompressedKlassPointers::base());
4964     subq(r, tmp);


4965   }
4966   if (CompressedKlassPointers::shift() != 0) {
4967     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
4968     shrq(r, LogKlassAlignmentInBytes);
4969   }
4970 }
4971 
4972 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
4973   assert_different_registers(src, dst);
4974   if (CompressedKlassPointers::base() != NULL) {












4975     mov64(dst, -(int64_t)CompressedKlassPointers::base());
4976     addq(dst, src);
4977   } else {
4978     movptr(dst, src);
4979   }
4980   if (CompressedKlassPointers::shift() != 0) {
4981     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
4982     shrq(dst, LogKlassAlignmentInBytes);
4983   }
4984 }
4985 
4986 void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
4987   assert_different_registers(r, tmp);
4988   // Note: it will change flags
4989   assert(UseCompressedClassPointers, "should only be used for compressed headers");
4990   // Cannot assert, unverified entry point counts instructions (see .ad file)
4991   // vtableStubs also counts instructions in pd_code_size_limit.
4992   // Also do not verify_oop as this is called by verify_oop.
4993   if (CompressedKlassPointers::shift() != 0) {
4994     assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
4995     shlq(r, LogKlassAlignmentInBytes);
4996   }
4997   if (CompressedKlassPointers::base() != NULL) {
4998     mov64(tmp, (int64_t)CompressedKlassPointers::base());









4999     addq(r, tmp);




5000   }
5001 }
5002 
5003 void  MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5004   assert_different_registers(src, dst);
5005   // Note: it will change flags
5006   assert (UseCompressedClassPointers, "should only be used for compressed headers");
5007   // Cannot assert, unverified entry point counts instructions (see .ad file)
5008   // vtableStubs also counts instructions in pd_code_size_limit.
5009   // Also do not verify_oop as this is called by verify_oop.
5010 
5011   if (CompressedKlassPointers::base() == NULL &&
5012       CompressedKlassPointers::shift() == 0) {
5013     // The best case scenario is that there is no base or shift. Then it is already
5014     // a pointer that needs nothing but a register rename.
5015     movl(dst, src);
5016   } else {
5017     if (CompressedKlassPointers::base() != NULL) {
5018       mov64(dst, (int64_t)CompressedKlassPointers::base());
5019     } else {
5020       xorq(dst, dst);
5021     }
5022     if (CompressedKlassPointers::shift() != 0) {
5023       assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5024       assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
5025       leaq(dst, Address(dst, src, Address::times_8, 0));
5026     } else {
5027       addq(dst, src);
5028     }










5029   }
5030 }
5031 
5032 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5033   assert (UseCompressedOops, "should only be used for compressed headers");
5034   assert (Universe::heap() != NULL, "java heap should be initialized");
5035   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5036   int oop_index = oop_recorder()->find_index(obj);
5037   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5038   mov_narrow_oop(dst, oop_index, rspec);
5039 }
5040 
5041 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5042   assert (UseCompressedOops, "should only be used for compressed headers");
5043   assert (Universe::heap() != NULL, "java heap should be initialized");
5044   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5045   int oop_index = oop_recorder()->find_index(obj);
5046   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5047   mov_narrow_oop(dst, oop_index, rspec);
5048 }

  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "asm/assembler.hpp"
  28 #include "asm/assembler.inline.hpp"
  29 #include "compiler/compiler_globals.hpp"
  30 #include "compiler/disassembler.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/barrierSetAssembler.hpp"
  33 #include "gc/shared/collectedHeap.inline.hpp"
  34 #include "gc/shared/tlab_globals.hpp"
  35 #include "interpreter/bytecodeHistogram.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/accessDecorators.hpp"
  41 #include "oops/compressedKlass.inline.hpp"
  42 #include "oops/compressedOops.inline.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/flags/flagSetting.hpp"
  46 #include "runtime/interfaceSupport.inline.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/objectMonitor.hpp"
  49 #include "runtime/os.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/safepointMechanism.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/thread.hpp"
  55 #include "utilities/align.hpp"
  56 #include "utilities/macros.hpp"
  57 #include "crc32c.h"
  58 
  59 #ifdef PRODUCT
  60 #define BLOCK_COMMENT(str) /* nothing */
  61 #define STOP(error) stop(error)
  62 #else
  63 #define BLOCK_COMMENT(str) block_comment(str)
  64 #define STOP(error) block_comment(error); stop(error)
  65 #endif
  66 
  67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  68 
  69 #ifdef ASSERT
  70 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
  71 #endif
  72 
  73 static Assembler::Condition reverse[] = {
  74     Assembler::noOverflow     /* overflow      = 0x0 */ ,
  75     Assembler::overflow       /* noOverflow    = 0x1 */ ,

3762   }
3763 
3764   if (offset == -1) {
3765     addptr(rsp, aligned_size);
3766   }
3767 }
3768 
3769 // Defines obj, preserves var_size_in_bytes
3770 void MacroAssembler::eden_allocate(Register thread, Register obj,
3771                                    Register var_size_in_bytes,
3772                                    int con_size_in_bytes,
3773                                    Register t1,
3774                                    Label& slow_case) {
3775   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3776   bs->eden_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
3777 }
3778 
3779 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
3780 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
3781   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
3782   assert((offset_in_bytes & (BytesPerInt - 1)) == 0, "offset must be a multiple of BytesPerInt");
3783   Label done;
3784 
3785   testptr(length_in_bytes, length_in_bytes);
3786   jcc(Assembler::zero, done);
3787 
3788   // Emit single 32bit store to clear leading bytes, if necessary.
3789   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
3790 #ifdef _LP64
3791   if (!is_aligned(offset_in_bytes, BytesPerWord)) {
3792     movl(Address(address, offset_in_bytes), temp);
3793     offset_in_bytes += BytesPerInt;
3794     decrement(length_in_bytes, BytesPerInt);
3795   }
3796   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
3797   testptr(length_in_bytes, length_in_bytes);
3798   jcc(Assembler::zero, done);
3799 #endif
3800 
3801   // initialize topmost word, divide index by 2, check if odd and test if zero
3802   // note: for the remaining code to work, index must be a multiple of BytesPerWord
3803 #ifdef ASSERT
3804   {
3805     Label L;
3806     testptr(length_in_bytes, BytesPerWord - 1);
3807     jcc(Assembler::zero, L);
3808     stop("length must be a multiple of BytesPerWord");
3809     bind(L);
3810   }
3811 #endif
3812   Register index = length_in_bytes;

3813   if (UseIncDec) {
3814     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
3815   } else {
3816     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
3817     shrptr(index, 1);
3818   }
3819 #ifndef _LP64
3820   // index could have not been a multiple of 8 (i.e., bit 2 was set)
3821   {
3822     Label even;
3823     // note: if index was a multiple of 8, then it cannot
3824     //       be 0 now otherwise it must have been 0 before
3825     //       => if it is even, we don't need to check for 0 again
3826     jcc(Assembler::carryClear, even);
3827     // clear topmost word (no jump would be needed if conditional assignment worked here)
3828     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
3829     // index could be 0 now, must check again
3830     jcc(Assembler::zero, done);
3831     bind(even);
3832   }

4726 
4727 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
4728   // get mirror
4729   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4730   load_method_holder(mirror, method);
4731   movptr(mirror, Address(mirror, mirror_offset));
4732   resolve_oop_handle(mirror, tmp);
4733 }
4734 
4735 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4736   load_method_holder(rresult, rmethod);
4737   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4738 }
4739 
4740 void MacroAssembler::load_method_holder(Register holder, Register method) {
4741   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
4742   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
4743   movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
4744 }
4745 



4746 #ifdef _LP64
4747 void MacroAssembler::load_nklass(Register dst, Register src) {
4748   assert_different_registers(src, dst);
4749   assert(UseCompressedClassPointers, "expect compressed class pointers");
4750 
4751   Label slow, done;
4752   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
4753   // NOTE: While it would seem nice to use xorb instead (for which we don't have an encoding in our assembler),
4754   // the encoding for xorq uses the signed version (0x81/6) of xor, which encodes as compact as xorb would,
4755   // and does't make a difference performance-wise.
4756   xorq(dst, markWord::unlocked_value);
4757   testb(dst, markWord::lock_mask_in_place);
4758   jccb(Assembler::notZero, slow);
4759 
4760   shrq(dst, markWord::klass_shift);
4761   jmp(done);
4762   bind(slow);
4763 
4764   if (dst != rax) {
4765     push(rax);
4766   }
4767   if (src != rax) {
4768     mov(rax, src);
4769   }
4770   call(RuntimeAddress(StubRoutines::load_nklass()));
4771   if (dst != rax) {
4772     mov(dst, rax);
4773     pop(rax);
4774   }
4775 
4776   bind(done);
4777 }
4778 #endif
4779 
4780 void MacroAssembler::load_klass(Register dst, Register src, Register tmp, bool null_check_src) {
4781   assert_different_registers(src, tmp);
4782   assert_different_registers(dst, tmp);
4783 #ifdef _LP64
4784   assert(UseCompressedClassPointers, "expect compressed class pointers");
4785   Register d = dst;
4786   if (src == dst) {
4787     d = tmp;
4788   }
4789   if (null_check_src) {
4790     null_check(src, oopDesc::mark_offset_in_bytes());
4791   }
4792   load_nklass(d, src);
4793   if (src == dst) {
4794     mov(dst, d);
4795   }
4796   decode_klass_not_null(dst, tmp);
4797 #else
4798   if (null_check_src) {
4799     null_check(src, oopDesc::klass_offset_in_bytes());
4800   }
4801   movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4802 #endif

4803 }
4804 
4805 #ifndef _LP64
4806 void MacroAssembler::store_klass(Register dst, Register src) {
4807   movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
4808 }
4809 #endif
4810 
4811 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
4812                                     Register tmp1, Register thread_tmp) {
4813   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4814   decorators = AccessInternal::decorator_fixup(decorators);
4815   bool as_raw = (decorators & AS_RAW) != 0;
4816   if (as_raw) {
4817     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4818   } else {
4819     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4820   }
4821 }
4822 
4823 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
4824                                      Register tmp1, Register tmp2, Register tmp3) {
4825   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4826   decorators = AccessInternal::decorator_fixup(decorators);
4827   bool as_raw = (decorators & AS_RAW) != 0;
4828   if (as_raw) {
4829     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3);
4830   } else {

4837   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4838 }
4839 
4840 // Doesn't do verification, generates fixed size code
4841 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4842                                             Register thread_tmp, DecoratorSet decorators) {
4843   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
4844 }
4845 
4846 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
4847                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
4848   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2, tmp3);
4849 }
4850 
4851 // Used for storing NULLs.
4852 void MacroAssembler::store_heap_oop_null(Address dst) {
4853   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
4854 }
4855 
4856 #ifdef _LP64







4857 #ifdef ASSERT
4858 void MacroAssembler::verify_heapbase(const char* msg) {
4859   assert (UseCompressedOops, "should be compressed");
4860   assert (Universe::heap() != NULL, "java heap should be initialized");
4861   if (CheckCompressedOops) {
4862     Label ok;
4863     const auto src2 = ExternalAddress((address)CompressedOops::ptrs_base_addr());
4864     assert(!src2.is_lval(), "should not be lval");
4865     const bool is_src2_reachable = reachable(src2);
4866     if (!is_src2_reachable) {
4867       push(rscratch1);  // cmpptr trashes rscratch1
4868     }
4869     cmpptr(r12_heapbase, src2);
4870     jcc(Assembler::equal, ok);
4871     STOP(msg);
4872     bind(ok);
4873     if (!is_src2_reachable) {
4874       pop(rscratch1);
4875     }
4876   }

4989     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4990     if (LogMinObjAlignmentInBytes == Address::times_8) {
4991       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
4992     } else {
4993       if (dst != src) {
4994         movq(dst, src);
4995       }
4996       shlq(dst, LogMinObjAlignmentInBytes);
4997       if (CompressedOops::base() != NULL) {
4998         addq(dst, r12_heapbase);
4999       }
5000     }
5001   } else {
5002     assert (CompressedOops::base() == NULL, "sanity");
5003     if (dst != src) {
5004       movq(dst, src);
5005     }
5006   }
5007 }
5008 
5009 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode = KlassDecodeNone;
5010 
5011 // Returns a static string
5012 const char* MacroAssembler::describe_klass_decode_mode(MacroAssembler::KlassDecodeMode mode) {
5013   switch (mode) {
5014   case KlassDecodeNone: return "none";
5015   case KlassDecodeZero: return "zero";
5016   case KlassDecodeXor:  return "xor";
5017   case KlassDecodeAdd:  return "add";
5018   default:
5019     ShouldNotReachHere();
5020   }
5021   return NULL;
5022 }
5023 
5024 // Return the current narrow Klass pointer decode mode.
5025 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
5026   if (_klass_decode_mode == KlassDecodeNone) {
5027     // First time initialization
5028     assert(UseCompressedClassPointers, "not using compressed class pointers");
5029     assert(Metaspace::initialized(), "metaspace not initialized yet");
5030 
5031     _klass_decode_mode = klass_decode_mode_for_base(CompressedKlassPointers::base());
5032     guarantee(_klass_decode_mode != KlassDecodeNone,
5033               PTR_FORMAT " is not a valid encoding base on aarch64",
5034               p2i(CompressedKlassPointers::base()));
5035     log_info(metaspace)("klass decode mode initialized: %s", describe_klass_decode_mode(_klass_decode_mode));
5036   }
5037   return _klass_decode_mode;
5038 }
5039 
5040 // Given an arbitrary base address, return the KlassDecodeMode that would be used. Return KlassDecodeNone
5041 // if base address is not valid for encoding.
5042 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode_for_base(address base) {
5043   assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
5044 
5045   const uint64_t base_u64 = (uint64_t) base;
5046 
5047   if (base_u64 == 0) {
5048     return KlassDecodeZero;
5049   }
5050 
5051   if ((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0) {
5052     return KlassDecodeXor;
5053   }
5054 
5055   // Note that there is no point in optimizing for shift=3 since lilliput
5056   // will use larger shifts
5057 
5058   // The add+shift mode for decode_and_move_klass_not_null() requires the base to be
5059   //  shiftable-without-loss. So, this is the minimum restriction on x64 for a valid
5060   //  encoding base. This does not matter in reality since the shift values we use for
5061   //  Lilliput, while large, won't be larger than a page size. And the encoding base
5062   //  will be quite likely page aligned since it usually falls to the beginning of
5063   //  either CDS or CCS.
5064   if ((base_u64 & (KlassAlignmentInBytes - 1)) == 0) {
5065     return KlassDecodeAdd;
5066   }
5067 
5068   return KlassDecodeNone;
5069 }
5070 
5071 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5072   assert_different_registers(r, tmp);
5073   switch (klass_decode_mode()) {
5074   case KlassDecodeZero: {
5075     shrq(r, CompressedKlassPointers::shift());
5076     break;
5077   }
5078   case KlassDecodeXor: {
5079     mov64(tmp, (int64_t)CompressedKlassPointers::base());
5080     xorq(r, tmp);
5081     shrq(r, CompressedKlassPointers::shift());
5082     break;
5083   }
5084   case KlassDecodeAdd: {
5085     mov64(tmp, (int64_t)CompressedKlassPointers::base());
5086     subq(r, tmp);
5087     shrq(r, CompressedKlassPointers::shift());
5088     break;
5089   }
5090   default:
5091     ShouldNotReachHere();

5092   }
5093 }
5094 
5095 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
5096   assert_different_registers(src, dst);
5097   switch (klass_decode_mode()) {
5098   case KlassDecodeZero: {
5099     movptr(dst, src);
5100     shrq(dst, CompressedKlassPointers::shift());
5101     break;
5102   }
5103   case KlassDecodeXor: {
5104     mov64(dst, (int64_t)CompressedKlassPointers::base());
5105     xorq(dst, src);
5106     shrq(dst, CompressedKlassPointers::shift());
5107     break;
5108   }
5109   case KlassDecodeAdd: {
5110     mov64(dst, -(int64_t)CompressedKlassPointers::base());
5111     addq(dst, src);
5112     shrq(dst, CompressedKlassPointers::shift());
5113     break;
5114   }
5115   default:
5116     ShouldNotReachHere();

5117   }
5118 }
5119 
5120 void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
5121   assert_different_registers(r, tmp);
5122   const uint64_t base_u64 = (uint64_t)CompressedKlassPointers::base();
5123   switch (klass_decode_mode()) {
5124   case KlassDecodeZero: {
5125     shlq(r, CompressedKlassPointers::shift());
5126     break;



5127   }
5128   case KlassDecodeXor: {
5129     assert((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0,
5130            "base " UINT64_FORMAT_X " invalid for xor mode", base_u64); // should have been handled at VM init.
5131     shlq(r, CompressedKlassPointers::shift());
5132     mov64(tmp, base_u64);
5133     xorq(r, tmp);
5134     break;
5135   }
5136   case KlassDecodeAdd: {
5137     shlq(r, CompressedKlassPointers::shift());
5138     mov64(tmp, base_u64);
5139     addq(r, tmp);
5140     break;
5141   }
5142   default:
5143     ShouldNotReachHere();
5144   }
5145 }
5146 
5147 void  MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5148   assert_different_registers(src, dst);
5149   // Note: Cannot assert, unverified entry point counts instructions (see .ad file)


5150   // vtableStubs also counts instructions in pd_code_size_limit.
5151   // Also do not verify_oop as this is called by verify_oop.
5152 
5153   const uint64_t base_u64 = (uint64_t)CompressedKlassPointers::base();
5154 
5155   switch (klass_decode_mode()) {
5156   case KlassDecodeZero: {
5157     movq(dst, src);
5158     shlq(dst, CompressedKlassPointers::shift());
5159     break;
5160   }
5161   case KlassDecodeXor: {
5162     assert((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0,
5163            "base " UINT64_FORMAT_X " invalid for xor mode", base_u64); // should have been handled at VM init.
5164     const uint64_t base_right_shifted = base_u64 >> CompressedKlassPointers::shift();
5165     mov64(dst, base_right_shifted);
5166     xorq(dst, src);
5167     shlq(dst, CompressedKlassPointers::shift());
5168     break;
5169   }
5170   case KlassDecodeAdd: {
5171     assert((base_u64 & (KlassAlignmentInBytes - 1)) == 0,
5172            "base " UINT64_FORMAT_X " invalid for add mode", base_u64); // should have been handled at VM init.
5173     const uint64_t base_right_shifted = base_u64 >> CompressedKlassPointers::shift();
5174     mov64(dst, base_right_shifted);
5175     addq(dst, src);
5176     shlq(dst, CompressedKlassPointers::shift());
5177     break;
5178   }
5179   default:
5180     ShouldNotReachHere();
5181   }
5182 }
5183 
5184 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5185   assert (UseCompressedOops, "should only be used for compressed headers");
5186   assert (Universe::heap() != NULL, "java heap should be initialized");
5187   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5188   int oop_index = oop_recorder()->find_index(obj);
5189   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5190   mov_narrow_oop(dst, oop_index, rspec);
5191 }
5192 
5193 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5194   assert (UseCompressedOops, "should only be used for compressed headers");
5195   assert (Universe::heap() != NULL, "java heap should be initialized");
5196   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5197   int oop_index = oop_recorder()->find_index(obj);
5198   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5199   mov_narrow_oop(dst, oop_index, rspec);
5200 }
< prev index next >