< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "asm/assembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "crc32c.h"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/barrierSetAssembler.hpp"
  33 #include "gc/shared/collectedHeap.inline.hpp"
  34 #include "gc/shared/tlab_globals.hpp"
  35 #include "interpreter/bytecodeHistogram.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm.h"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/accessDecorators.hpp"

  41 #include "oops/compressedOops.inline.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/continuation.hpp"
  45 #include "runtime/flags/flagSetting.hpp"
  46 #include "runtime/interfaceSupport.inline.hpp"
  47 #include "runtime/javaThread.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/objectMonitor.hpp"
  50 #include "runtime/os.hpp"
  51 #include "runtime/safepoint.hpp"
  52 #include "runtime/safepointMechanism.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/stubRoutines.hpp"
  55 #include "utilities/macros.hpp"
  56 
  57 #ifdef PRODUCT
  58 #define BLOCK_COMMENT(str) /* nothing */
  59 #define STOP(error) stop(error)
  60 #else

4153 
4154   int restore_offset;
4155   if (offset == -1) {
4156     restore_offset = restore_size - gp_reg_size;
4157   } else {
4158     restore_offset = offset + restore_size - gp_reg_size;
4159   }
4160   for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
4161     movptr(*it, Address(rsp, restore_offset));
4162     restore_offset -= gp_reg_size;
4163   }
4164 
4165   if (offset == -1) {
4166     addptr(rsp, aligned_size);
4167   }
4168 }
4169 
4170 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
4171 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
4172   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
4173   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
4174   Label done;
4175 
4176   testptr(length_in_bytes, length_in_bytes);
4177   jcc(Assembler::zero, done);
4178 













4179   // initialize topmost word, divide index by 2, check if odd and test if zero
4180   // note: for the remaining code to work, index must be a multiple of BytesPerWord
4181 #ifdef ASSERT
4182   {
4183     Label L;
4184     testptr(length_in_bytes, BytesPerWord - 1);
4185     jcc(Assembler::zero, L);
4186     stop("length must be a multiple of BytesPerWord");
4187     bind(L);
4188   }
4189 #endif
4190   Register index = length_in_bytes;
4191   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
4192   if (UseIncDec) {
4193     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
4194   } else {
4195     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
4196     shrptr(index, 1);
4197   }
4198 #ifndef _LP64
4199   // index could have not been a multiple of 8 (i.e., bit 2 was set)
4200   {
4201     Label even;
4202     // note: if index was a multiple of 8, then it cannot
4203     //       be 0 now otherwise it must have been 0 before
4204     //       => if it is even, we don't need to check for 0 again
4205     jcc(Assembler::carryClear, even);
4206     // clear topmost word (no jump would be needed if conditional assignment worked here)
4207     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
4208     // index could be 0 now, must check again
4209     jcc(Assembler::zero, done);
4210     bind(even);
4211   }

5103 
5104 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
5105   // get mirror
5106   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5107   load_method_holder(mirror, method);
5108   movptr(mirror, Address(mirror, mirror_offset));
5109   resolve_oop_handle(mirror, tmp);
5110 }
5111 
5112 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5113   load_method_holder(rresult, rmethod);
5114   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5115 }
5116 
5117 void MacroAssembler::load_method_holder(Register holder, Register method) {
5118   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
5119   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
5120   movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
5121 }
5122 






















5123 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
5124   assert_different_registers(src, tmp);
5125   assert_different_registers(dst, tmp);
5126 #ifdef _LP64
5127   if (UseCompressedClassPointers) {
5128     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5129     decode_klass_not_null(dst, tmp);
5130   } else
5131 #endif
5132     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5133 }
5134 
5135 void MacroAssembler::load_klass_check_null(Register dst, Register src, Register tmp) {
5136   null_check(src, oopDesc::klass_offset_in_bytes());




5137   load_klass(dst, src, tmp);
5138 }
5139 
5140 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {

5141   assert_different_registers(src, tmp);
5142   assert_different_registers(dst, tmp);
5143 #ifdef _LP64
5144   if (UseCompressedClassPointers) {
5145     encode_klass_not_null(src, tmp);
5146     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5147   } else
5148 #endif
5149     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);







































5150 }
5151 
5152 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
5153                                     Register tmp1, Register thread_tmp) {
5154   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5155   decorators = AccessInternal::decorator_fixup(decorators, type);
5156   bool as_raw = (decorators & AS_RAW) != 0;
5157   if (as_raw) {
5158     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
5159   } else {
5160     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
5161   }
5162 }
5163 
5164 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
5165                                      Register tmp1, Register tmp2, Register tmp3) {
5166   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5167   decorators = AccessInternal::decorator_fixup(decorators, type);
5168   bool as_raw = (decorators & AS_RAW) != 0;
5169   if (as_raw) {

5336     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5337     if (LogMinObjAlignmentInBytes == Address::times_8) {
5338       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5339     } else {
5340       if (dst != src) {
5341         movq(dst, src);
5342       }
5343       shlq(dst, LogMinObjAlignmentInBytes);
5344       if (CompressedOops::base() != NULL) {
5345         addq(dst, r12_heapbase);
5346       }
5347     }
5348   } else {
5349     assert (CompressedOops::base() == NULL, "sanity");
5350     if (dst != src) {
5351       movq(dst, src);
5352     }
5353   }
5354 }
5355 






























































5356 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5357   assert_different_registers(r, tmp);
5358   if (CompressedKlassPointers::base() != NULL) {











5359     mov64(tmp, (int64_t)CompressedKlassPointers::base());
5360     subq(r, tmp);


5361   }
5362   if (CompressedKlassPointers::shift() != 0) {
5363     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5364     shrq(r, LogKlassAlignmentInBytes);
5365   }
5366 }
5367 
5368 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
5369   assert_different_registers(src, dst);
5370   if (CompressedKlassPointers::base() != NULL) {












5371     mov64(dst, -(int64_t)CompressedKlassPointers::base());
5372     addq(dst, src);
5373   } else {
5374     movptr(dst, src);
5375   }
5376   if (CompressedKlassPointers::shift() != 0) {
5377     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5378     shrq(dst, LogKlassAlignmentInBytes);
5379   }
5380 }
5381 
5382 void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
5383   assert_different_registers(r, tmp);
5384   // Note: it will change flags
5385   assert(UseCompressedClassPointers, "should only be used for compressed headers");
5386   // Cannot assert, unverified entry point counts instructions (see .ad file)
5387   // vtableStubs also counts instructions in pd_code_size_limit.
5388   // Also do not verify_oop as this is called by verify_oop.
5389   if (CompressedKlassPointers::shift() != 0) {
5390     assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5391     shlq(r, LogKlassAlignmentInBytes);
5392   }
5393   if (CompressedKlassPointers::base() != NULL) {
5394     mov64(tmp, (int64_t)CompressedKlassPointers::base());









5395     addq(r, tmp);




5396   }
5397 }
5398 
5399 void  MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5400   assert_different_registers(src, dst);
5401   // Note: it will change flags
5402   assert (UseCompressedClassPointers, "should only be used for compressed headers");
5403   // Cannot assert, unverified entry point counts instructions (see .ad file)
5404   // vtableStubs also counts instructions in pd_code_size_limit.
5405   // Also do not verify_oop as this is called by verify_oop.
5406 
5407   if (CompressedKlassPointers::base() == NULL &&
5408       CompressedKlassPointers::shift() == 0) {
5409     // The best case scenario is that there is no base or shift. Then it is already
5410     // a pointer that needs nothing but a register rename.
5411     movl(dst, src);
5412   } else {
5413     if (CompressedKlassPointers::base() != NULL) {
5414       mov64(dst, (int64_t)CompressedKlassPointers::base());
5415     } else {
5416       xorq(dst, dst);
5417     }
5418     if (CompressedKlassPointers::shift() != 0) {
5419       assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5420       assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
5421       leaq(dst, Address(dst, src, Address::times_8, 0));
5422     } else {
5423       addq(dst, src);
5424     }










5425   }
5426 }
5427 
5428 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5429   assert (UseCompressedOops, "should only be used for compressed headers");
5430   assert (Universe::heap() != NULL, "java heap should be initialized");
5431   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5432   int oop_index = oop_recorder()->find_index(obj);
5433   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5434   mov_narrow_oop(dst, oop_index, rspec);
5435 }
5436 
5437 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5438   assert (UseCompressedOops, "should only be used for compressed headers");
5439   assert (Universe::heap() != NULL, "java heap should be initialized");
5440   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5441   int oop_index = oop_recorder()->find_index(obj);
5442   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5443   mov_narrow_oop(dst, oop_index, rspec);
5444 }

9659 }
9660 
9661 
9662 #endif // !WIN32 || _LP64
9663 
9664 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
9665   Label L_stack_ok;
9666   if (bias == 0) {
9667     testptr(sp, 2 * wordSize - 1);
9668   } else {
9669     // lea(tmp, Address(rsp, bias);
9670     mov(tmp, sp);
9671     addptr(tmp, bias);
9672     testptr(tmp, 2 * wordSize - 1);
9673   }
9674   jcc(Assembler::equal, L_stack_ok);
9675   block_comment(msg);
9676   stop(msg);
9677   bind(L_stack_ok);
9678 }



























































  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "asm/assembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "crc32c.h"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/barrierSetAssembler.hpp"
  33 #include "gc/shared/collectedHeap.inline.hpp"
  34 #include "gc/shared/tlab_globals.hpp"
  35 #include "interpreter/bytecodeHistogram.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jvm.h"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/accessDecorators.hpp"
  41 #include "oops/compressedKlass.inline.hpp"
  42 #include "oops/compressedOops.inline.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/continuation.hpp"
  46 #include "runtime/flags/flagSetting.hpp"
  47 #include "runtime/interfaceSupport.inline.hpp"
  48 #include "runtime/javaThread.hpp"
  49 #include "runtime/jniHandles.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/os.hpp"
  52 #include "runtime/safepoint.hpp"
  53 #include "runtime/safepointMechanism.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/stubRoutines.hpp"
  56 #include "utilities/macros.hpp"
  57 
  58 #ifdef PRODUCT
  59 #define BLOCK_COMMENT(str) /* nothing */
  60 #define STOP(error) stop(error)
  61 #else

4154 
4155   int restore_offset;
4156   if (offset == -1) {
4157     restore_offset = restore_size - gp_reg_size;
4158   } else {
4159     restore_offset = offset + restore_size - gp_reg_size;
4160   }
4161   for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
4162     movptr(*it, Address(rsp, restore_offset));
4163     restore_offset -= gp_reg_size;
4164   }
4165 
4166   if (offset == -1) {
4167     addptr(rsp, aligned_size);
4168   }
4169 }
4170 
4171 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
4172 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
4173   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
4174   assert((offset_in_bytes & (BytesPerInt - 1)) == 0, "offset must be a multiple of BytesPerInt");
4175   Label done;
4176 
4177   testptr(length_in_bytes, length_in_bytes);
4178   jcc(Assembler::zero, done);
4179 
4180   // Emit single 32bit store to clear leading bytes, if necessary.
4181   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
4182 #ifdef _LP64
4183   if (!is_aligned(offset_in_bytes, BytesPerWord)) {
4184     movl(Address(address, offset_in_bytes), temp);
4185     offset_in_bytes += BytesPerInt;
4186     decrement(length_in_bytes, BytesPerInt);
4187   }
4188   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
4189   testptr(length_in_bytes, length_in_bytes);
4190   jcc(Assembler::zero, done);
4191 #endif
4192 
4193   // initialize topmost word, divide index by 2, check if odd and test if zero
4194   // note: for the remaining code to work, index must be a multiple of BytesPerWord
4195 #ifdef ASSERT
4196   {
4197     Label L;
4198     testptr(length_in_bytes, BytesPerWord - 1);
4199     jcc(Assembler::zero, L);
4200     stop("length must be a multiple of BytesPerWord");
4201     bind(L);
4202   }
4203 #endif
4204   Register index = length_in_bytes;

4205   if (UseIncDec) {
4206     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
4207   } else {
4208     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
4209     shrptr(index, 1);
4210   }
4211 #ifndef _LP64
4212   // index could have not been a multiple of 8 (i.e., bit 2 was set)
4213   {
4214     Label even;
4215     // note: if index was a multiple of 8, then it cannot
4216     //       be 0 now otherwise it must have been 0 before
4217     //       => if it is even, we don't need to check for 0 again
4218     jcc(Assembler::carryClear, even);
4219     // clear topmost word (no jump would be needed if conditional assignment worked here)
4220     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
4221     // index could be 0 now, must check again
4222     jcc(Assembler::zero, done);
4223     bind(even);
4224   }

5116 
5117 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
5118   // get mirror
5119   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5120   load_method_holder(mirror, method);
5121   movptr(mirror, Address(mirror, mirror_offset));
5122   resolve_oop_handle(mirror, tmp);
5123 }
5124 
5125 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5126   load_method_holder(rresult, rmethod);
5127   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5128 }
5129 
5130 void MacroAssembler::load_method_holder(Register holder, Register method) {
5131   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
5132   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
5133   movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
5134 }
5135 
5136 #ifdef _LP64
5137 void MacroAssembler::load_nklass(Register dst, Register src) {
5138   assert(UseCompressedClassPointers, "expect compressed class pointers");
5139 
5140   if (!UseCompactObjectHeaders) {
5141     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5142     return;
5143   }
5144 
5145   Label fast;
5146   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5147   testb(dst, markWord::monitor_value);
5148   jccb(Assembler::zero, fast);
5149 
5150   // Fetch displaced header
5151   movq(dst, Address(dst, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
5152 
5153   bind(fast);
5154   shrq(dst, markWord::klass_shift);
5155 }
5156 #endif
5157 
5158 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
5159   assert_different_registers(src, tmp);
5160   assert_different_registers(dst, tmp);
5161 #ifdef _LP64
5162   if (UseCompressedClassPointers) {
5163     load_nklass(dst, src);
5164     decode_klass_not_null(dst, tmp);
5165   } else
5166 #endif
5167     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5168 }
5169 
5170 void MacroAssembler::load_klass_check_null(Register dst, Register src, Register tmp) {
5171   if (UseCompactObjectHeaders) {
5172     null_check(src, oopDesc::mark_offset_in_bytes());
5173   } else {
5174     null_check(src, oopDesc::klass_offset_in_bytes());
5175   }
5176   load_klass(dst, src, tmp);
5177 }
5178 
5179 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
5180   assert(!UseCompactObjectHeaders, "not with compact headers");
5181   assert_different_registers(src, tmp);
5182   assert_different_registers(dst, tmp);
5183 #ifdef _LP64
5184   if (UseCompressedClassPointers) {
5185     encode_klass_not_null(src, tmp);
5186     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5187   } else
5188 #endif
5189    movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5190 }
5191 
5192 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
5193 #ifdef _LP64
5194   if (UseCompactObjectHeaders) {
5195     // NOTE: We need to deal with possible ObjectMonitor in object header.
5196     // Eventually we might be able to do simple movl & cmpl like in
5197     // the CCP path below.
5198     load_nklass(tmp, obj);
5199     cmpl(klass, tmp);
5200   } else if (UseCompressedClassPointers) {
5201     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5202   } else
5203 #endif
5204   {
5205     cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5206   }
5207 }
5208 
5209 void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) {
5210 #ifdef _LP64
5211   if (UseCompactObjectHeaders) {
5212     // NOTE: We need to deal with possible ObjectMonitor in object header.
5213     // Eventually we might be able to do simple movl & cmpl like in
5214     // the CCP path below.
5215     assert(tmp2 != noreg, "need tmp2");
5216     assert_different_registers(src, dst, tmp1, tmp2);
5217     load_nklass(tmp1, src);
5218     load_nklass(tmp2, dst);
5219     cmpl(tmp1, tmp2);
5220   } else if (UseCompressedClassPointers) {
5221     movl(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
5222     cmpl(tmp1, Address(dst, oopDesc::klass_offset_in_bytes()));
5223   } else
5224 #endif
5225   {
5226     movptr(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
5227     cmpptr(tmp1, Address(dst, oopDesc::klass_offset_in_bytes()));
5228   }
5229 }
5230 
5231 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
5232                                     Register tmp1, Register thread_tmp) {
5233   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5234   decorators = AccessInternal::decorator_fixup(decorators, type);
5235   bool as_raw = (decorators & AS_RAW) != 0;
5236   if (as_raw) {
5237     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
5238   } else {
5239     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
5240   }
5241 }
5242 
5243 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
5244                                      Register tmp1, Register tmp2, Register tmp3) {
5245   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5246   decorators = AccessInternal::decorator_fixup(decorators, type);
5247   bool as_raw = (decorators & AS_RAW) != 0;
5248   if (as_raw) {

5415     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5416     if (LogMinObjAlignmentInBytes == Address::times_8) {
5417       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5418     } else {
5419       if (dst != src) {
5420         movq(dst, src);
5421       }
5422       shlq(dst, LogMinObjAlignmentInBytes);
5423       if (CompressedOops::base() != NULL) {
5424         addq(dst, r12_heapbase);
5425       }
5426     }
5427   } else {
5428     assert (CompressedOops::base() == NULL, "sanity");
5429     if (dst != src) {
5430       movq(dst, src);
5431     }
5432   }
5433 }
5434 
5435 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode = KlassDecodeNone;
5436 
5437 // Returns a static string
5438 const char* MacroAssembler::describe_klass_decode_mode(MacroAssembler::KlassDecodeMode mode) {
5439   switch (mode) {
5440   case KlassDecodeNone: return "none";
5441   case KlassDecodeZero: return "zero";
5442   case KlassDecodeXor:  return "xor";
5443   case KlassDecodeAdd:  return "add";
5444   default:
5445     ShouldNotReachHere();
5446   }
5447   return NULL;
5448 }
5449 
5450 // Return the current narrow Klass pointer decode mode.
5451 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
5452   if (_klass_decode_mode == KlassDecodeNone) {
5453     // First time initialization
5454     assert(UseCompressedClassPointers, "not using compressed class pointers");
5455     assert(Metaspace::initialized(), "metaspace not initialized yet");
5456 
5457     _klass_decode_mode = klass_decode_mode_for_base(CompressedKlassPointers::base());
5458     guarantee(_klass_decode_mode != KlassDecodeNone,
5459               PTR_FORMAT " is not a valid encoding base on aarch64",
5460               p2i(CompressedKlassPointers::base()));
5461     log_info(metaspace)("klass decode mode initialized: %s", describe_klass_decode_mode(_klass_decode_mode));
5462   }
5463   return _klass_decode_mode;
5464 }
5465 
5466 // Given an arbitrary base address, return the KlassDecodeMode that would be used. Return KlassDecodeNone
5467 // if base address is not valid for encoding.
5468 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode_for_base(address base) {
5469   assert(CompressedKlassPointers::shift() != 0, "not lilliput?");
5470 
5471   const uint64_t base_u64 = (uint64_t) base;
5472 
5473   if (base_u64 == 0) {
5474     return KlassDecodeZero;
5475   }
5476 
5477   if ((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0) {
5478     return KlassDecodeXor;
5479   }
5480 
5481   // Note that there is no point in optimizing for shift=3 since lilliput
5482   // will use larger shifts
5483 
5484   // The add+shift mode for decode_and_move_klass_not_null() requires the base to be
5485   //  shiftable-without-loss. So, this is the minimum restriction on x64 for a valid
5486   //  encoding base. This does not matter in reality since the shift values we use for
5487   //  Lilliput, while large, won't be larger than a page size. And the encoding base
5488   //  will be quite likely page aligned since it usually falls to the beginning of
5489   //  either CDS or CCS.
5490   if ((base_u64 & (KlassAlignmentInBytes - 1)) == 0) {
5491     return KlassDecodeAdd;
5492   }
5493 
5494   return KlassDecodeNone;
5495 }
5496 
5497 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5498   assert_different_registers(r, tmp);
5499   switch (klass_decode_mode()) {
5500   case KlassDecodeZero: {
5501     shrq(r, CompressedKlassPointers::shift());
5502     break;
5503   }
5504   case KlassDecodeXor: {
5505     mov64(tmp, (int64_t)CompressedKlassPointers::base());
5506     xorq(r, tmp);
5507     shrq(r, CompressedKlassPointers::shift());
5508     break;
5509   }
5510   case KlassDecodeAdd: {
5511     mov64(tmp, (int64_t)CompressedKlassPointers::base());
5512     subq(r, tmp);
5513     shrq(r, CompressedKlassPointers::shift());
5514     break;
5515   }
5516   default:
5517     ShouldNotReachHere();

5518   }
5519 }
5520 
5521 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
5522   assert_different_registers(src, dst);
5523   switch (klass_decode_mode()) {
5524   case KlassDecodeZero: {
5525     movptr(dst, src);
5526     shrq(dst, CompressedKlassPointers::shift());
5527     break;
5528   }
5529   case KlassDecodeXor: {
5530     mov64(dst, (int64_t)CompressedKlassPointers::base());
5531     xorq(dst, src);
5532     shrq(dst, CompressedKlassPointers::shift());
5533     break;
5534   }
5535   case KlassDecodeAdd: {
5536     mov64(dst, -(int64_t)CompressedKlassPointers::base());
5537     addq(dst, src);
5538     shrq(dst, CompressedKlassPointers::shift());
5539     break;
5540   }
5541   default:
5542     ShouldNotReachHere();

5543   }
5544 }
5545 
5546 void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
5547   assert_different_registers(r, tmp);
5548   const uint64_t base_u64 = (uint64_t)CompressedKlassPointers::base();
5549   switch (klass_decode_mode()) {
5550   case KlassDecodeZero: {
5551     shlq(r, CompressedKlassPointers::shift());
5552     break;



5553   }
5554   case KlassDecodeXor: {
5555     assert((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0,
5556            "base " UINT64_FORMAT_X " invalid for xor mode", base_u64); // should have been handled at VM init.
5557     shlq(r, CompressedKlassPointers::shift());
5558     mov64(tmp, base_u64);
5559     xorq(r, tmp);
5560     break;
5561   }
5562   case KlassDecodeAdd: {
5563     shlq(r, CompressedKlassPointers::shift());
5564     mov64(tmp, base_u64);
5565     addq(r, tmp);
5566     break;
5567   }
5568   default:
5569     ShouldNotReachHere();
5570   }
5571 }
5572 
5573 void  MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5574   assert_different_registers(src, dst);
5575   // Note: Cannot assert, unverified entry point counts instructions (see .ad file)


5576   // vtableStubs also counts instructions in pd_code_size_limit.
5577   // Also do not verify_oop as this is called by verify_oop.
5578 
5579   const uint64_t base_u64 = (uint64_t)CompressedKlassPointers::base();
5580 
5581   switch (klass_decode_mode()) {
5582   case KlassDecodeZero: {
5583     movq(dst, src);
5584     shlq(dst, CompressedKlassPointers::shift());
5585     break;
5586   }
5587   case KlassDecodeXor: {
5588     assert((base_u64 & (KlassEncodingMetaspaceMax - 1)) == 0,
5589            "base " UINT64_FORMAT_X " invalid for xor mode", base_u64); // should have been handled at VM init.
5590     const uint64_t base_right_shifted = base_u64 >> CompressedKlassPointers::shift();
5591     mov64(dst, base_right_shifted);
5592     xorq(dst, src);
5593     shlq(dst, CompressedKlassPointers::shift());
5594     break;
5595   }
5596   case KlassDecodeAdd: {
5597     assert((base_u64 & (KlassAlignmentInBytes - 1)) == 0,
5598            "base " UINT64_FORMAT_X " invalid for add mode", base_u64); // should have been handled at VM init.
5599     const uint64_t base_right_shifted = base_u64 >> CompressedKlassPointers::shift();
5600     mov64(dst, base_right_shifted);
5601     addq(dst, src);
5602     shlq(dst, CompressedKlassPointers::shift());
5603     break;
5604   }
5605   default:
5606     ShouldNotReachHere();
5607   }
5608 }
5609 
5610 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5611   assert (UseCompressedOops, "should only be used for compressed headers");
5612   assert (Universe::heap() != NULL, "java heap should be initialized");
5613   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5614   int oop_index = oop_recorder()->find_index(obj);
5615   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5616   mov_narrow_oop(dst, oop_index, rspec);
5617 }
5618 
5619 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5620   assert (UseCompressedOops, "should only be used for compressed headers");
5621   assert (Universe::heap() != NULL, "java heap should be initialized");
5622   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5623   int oop_index = oop_recorder()->find_index(obj);
5624   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5625   mov_narrow_oop(dst, oop_index, rspec);
5626 }

9841 }
9842 
9843 
9844 #endif // !WIN32 || _LP64
9845 
9846 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
9847   Label L_stack_ok;
9848   if (bias == 0) {
9849     testptr(sp, 2 * wordSize - 1);
9850   } else {
9851     // lea(tmp, Address(rsp, bias);
9852     mov(tmp, sp);
9853     addptr(tmp, bias);
9854     testptr(tmp, 2 * wordSize - 1);
9855   }
9856   jcc(Assembler::equal, L_stack_ok);
9857   block_comment(msg);
9858   stop(msg);
9859   bind(L_stack_ok);
9860 }
9861 
9862 void MacroAssembler::fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow, bool rt_check_stack) {
9863   assert(hdr == rax, "header must be in rax for cmpxchg");
9864   assert_different_registers(obj, hdr, thread, tmp);
9865 
9866   // First we need to check if the lock-stack has room for pushing the object reference.
9867   if (rt_check_stack) {
9868     movptr(tmp, Address(thread, JavaThread::lock_stack_current_offset()));
9869     cmpptr(tmp, Address(thread, JavaThread::lock_stack_limit_offset()));
9870     jcc(Assembler::greaterEqual, slow);
9871   }
9872 #ifdef ASSERT
9873   else {
9874     Label ok;
9875     movptr(tmp, Address(thread, JavaThread::lock_stack_current_offset()));
9876     cmpptr(tmp, Address(thread, JavaThread::lock_stack_limit_offset()));
9877     jcc(Assembler::less, ok);
9878     stop("Not enough room in lock stack; should have been checked in the method prologue");
9879     bind(ok);
9880   }
9881 #endif
9882 
9883   // Now we attempt to take the fast-lock.
9884   // Clear lowest two header bits (locked state).
9885   andptr(hdr, ~(int32_t)markWord::lock_mask_in_place);
9886   movptr(tmp, hdr);
9887   // Set lowest bit (unlocked state).
9888   orptr(hdr, markWord::unlocked_value);
9889   lock();
9890   cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
9891   jcc(Assembler::notEqual, slow);
9892 
9893   // If successful, push object to lock-stack.
9894   movptr(tmp, Address(thread, JavaThread::lock_stack_current_offset()));
9895   movptr(Address(tmp, 0), obj);
9896   increment(tmp, oopSize);
9897   movptr(Address(thread, JavaThread::lock_stack_current_offset()), tmp);
9898 }
9899 
9900 void MacroAssembler::fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow) {
9901   assert(hdr == rax, "header must be in rax for cmpxchg");
9902   assert_different_registers(obj, hdr, tmp);
9903 
9904   // Mark-word must be 00 now, try to swing it back to 01 (unlocked)
9905   movptr(tmp, hdr); // The expected old value
9906   orptr(tmp, markWord::unlocked_value);
9907   lock();
9908   cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
9909   jcc(Assembler::notEqual, slow);
9910   // Pop the lock object from the lock-stack.
9911 #ifdef _LP64
9912   const Register thread = r15_thread;
9913 #else
9914   const Register thread = rax;
9915   get_thread(rax);
9916 #endif
9917   subptr(Address(thread, JavaThread::lock_stack_current_offset()), oopSize);
9918 }
< prev index next >