< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "precompiled.hpp"
   26 #include "asm/assembler.hpp"
   27 #include "asm/assembler.inline.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"

   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "jvm.h"
   39 #include "memory/resourceArea.hpp"
   40 #include "memory/universe.hpp"
   41 #include "oops/accessDecorators.hpp"
   42 #include "oops/compressedKlass.inline.hpp"
   43 #include "oops/compressedOops.inline.hpp"
   44 #include "oops/klass.inline.hpp"

   45 #include "prims/methodHandles.hpp"
   46 #include "runtime/continuation.hpp"
   47 #include "runtime/interfaceSupport.inline.hpp"
   48 #include "runtime/javaThread.hpp"
   49 #include "runtime/jniHandles.hpp"
   50 #include "runtime/objectMonitor.hpp"
   51 #include "runtime/os.hpp"
   52 #include "runtime/safepoint.hpp"
   53 #include "runtime/safepointMechanism.hpp"
   54 #include "runtime/sharedRuntime.hpp"

   55 #include "runtime/stubRoutines.hpp"
   56 #include "utilities/checkedCast.hpp"
   57 #include "utilities/macros.hpp"




   58 
   59 #ifdef PRODUCT
   60 #define BLOCK_COMMENT(str) /* nothing */
   61 #define STOP(error) stop(error)
   62 #else
   63 #define BLOCK_COMMENT(str) block_comment(str)
   64 #define STOP(error) block_comment(error); stop(error)
   65 #endif
   66 
   67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   68 
   69 #ifdef ASSERT
   70 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   71 #endif
   72 
   73 static const Assembler::Condition reverse[] = {
   74     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   75     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   76     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   77     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1701 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1702   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1703   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1704   pass_arg2(this, arg_2);
 1705   pass_arg1(this, arg_1);
 1706   pass_arg0(this, arg_0);
 1707   call_VM_leaf(entry_point, 3);
 1708 }
 1709 
 1710 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1711   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3));
 1712   LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3));
 1713   LP64_ONLY(assert_different_registers(arg_2, c_rarg3));
 1714   pass_arg3(this, arg_3);
 1715   pass_arg2(this, arg_2);
 1716   pass_arg1(this, arg_1);
 1717   pass_arg0(this, arg_0);
 1718   call_VM_leaf(entry_point, 3);
 1719 }
 1720 




 1721 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1722   pass_arg0(this, arg_0);
 1723   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1724 }
 1725 
 1726 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1727   LP64_ONLY(assert_different_registers(arg_0, c_rarg1));
 1728   pass_arg1(this, arg_1);
 1729   pass_arg0(this, arg_0);
 1730   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1731 }
 1732 
 1733 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1734   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1735   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1736   pass_arg2(this, arg_2);
 1737   pass_arg1(this, arg_1);
 1738   pass_arg0(this, arg_0);
 1739   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1740 }

 2885     lea(rscratch, src);
 2886     Assembler::mulss(dst, Address(rscratch, 0));
 2887   }
 2888 }
 2889 
 2890 void MacroAssembler::null_check(Register reg, int offset) {
 2891   if (needs_explicit_null_check(offset)) {
 2892     // provoke OS null exception if reg is null by
 2893     // accessing M[reg] w/o changing any (non-CC) registers
 2894     // NOTE: cmpl is plenty here to provoke a segv
 2895     cmpptr(rax, Address(reg, 0));
 2896     // Note: should probably use testl(rax, Address(reg, 0));
 2897     //       may be shorter code (however, this version of
 2898     //       testl needs to be implemented first)
 2899   } else {
 2900     // nothing to do, (later) access of M[reg + offset]
 2901     // will provoke OS null exception if reg is null
 2902   }
 2903 }
 2904 





























































































































 2905 void MacroAssembler::os_breakpoint() {
 2906   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2907   // (e.g., MSVC can't call ps() otherwise)
 2908   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2909 }
 2910 
 2911 void MacroAssembler::unimplemented(const char* what) {
 2912   const char* buf = nullptr;
 2913   {
 2914     ResourceMark rm;
 2915     stringStream ss;
 2916     ss.print("unimplemented: %s", what);
 2917     buf = code_string(ss.as_string());
 2918   }
 2919   stop(buf);
 2920 }
 2921 
 2922 #ifdef _LP64
 2923 #define XSTATE_BV 0x200
 2924 #endif

 4039 }
 4040 
 4041 // C++ bool manipulation
 4042 void MacroAssembler::testbool(Register dst) {
 4043   if(sizeof(bool) == 1)
 4044     testb(dst, 0xff);
 4045   else if(sizeof(bool) == 2) {
 4046     // testw implementation needed for two byte bools
 4047     ShouldNotReachHere();
 4048   } else if(sizeof(bool) == 4)
 4049     testl(dst, dst);
 4050   else
 4051     // unsupported
 4052     ShouldNotReachHere();
 4053 }
 4054 
 4055 void MacroAssembler::testptr(Register dst, Register src) {
 4056   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 4057 }
 4058 


















































































































 4059 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 4060 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 4061                                    Register var_size_in_bytes,
 4062                                    int con_size_in_bytes,
 4063                                    Register t1,
 4064                                    Register t2,
 4065                                    Label& slow_case) {
 4066   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 4067   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 4068 }
 4069 
 4070 RegSet MacroAssembler::call_clobbered_gp_registers() {
 4071   RegSet regs;
 4072 #ifdef _LP64
 4073   regs += RegSet::of(rax, rcx, rdx);
 4074 #ifndef WINDOWS
 4075   regs += RegSet::of(rsi, rdi);
 4076 #endif
 4077   regs += RegSet::range(r8, r11);
 4078 #else

 4292     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4293     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4294     // index could be 0 now, must check again
 4295     jcc(Assembler::zero, done);
 4296     bind(even);
 4297   }
 4298 #endif // !_LP64
 4299   // initialize remaining object fields: index is a multiple of 2 now
 4300   {
 4301     Label loop;
 4302     bind(loop);
 4303     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4304     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4305     decrement(index);
 4306     jcc(Assembler::notZero, loop);
 4307   }
 4308 
 4309   bind(done);
 4310 }
 4311 


















































 4312 // Look up the method for a megamorphic invokeinterface call.
 4313 // The target method is determined by <intf_klass, itable_index>.
 4314 // The receiver klass is in recv_klass.
 4315 // On success, the result will be in method_result, and execution falls through.
 4316 // On failure, execution transfers to the given label.
 4317 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4318                                              Register intf_klass,
 4319                                              RegisterOrConstant itable_index,
 4320                                              Register method_result,
 4321                                              Register scan_temp,
 4322                                              Label& L_no_such_interface,
 4323                                              bool return_method) {
 4324   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4325   assert_different_registers(method_result, intf_klass, scan_temp);
 4326   assert(recv_klass != method_result || !return_method,
 4327          "recv_klass can be destroyed when method isn't needed");
 4328 
 4329   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4330          "caller must use same register for non-constant itable index as for method");
 4331 

 4759   } else {
 4760     Label L;
 4761     jccb(negate_condition(cc), L);
 4762     movl(dst, src);
 4763     bind(L);
 4764   }
 4765 }
 4766 
 4767 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4768   if (VM_Version::supports_cmov()) {
 4769     cmovl(cc, dst, src);
 4770   } else {
 4771     Label L;
 4772     jccb(negate_condition(cc), L);
 4773     movl(dst, src);
 4774     bind(L);
 4775   }
 4776 }
 4777 
 4778 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4779   if (!VerifyOops) return;




 4780 
 4781   BLOCK_COMMENT("verify_oop {");
 4782 #ifdef _LP64
 4783   push(rscratch1);
 4784 #endif
 4785   push(rax);                          // save rax
 4786   push(reg);                          // pass register argument
 4787 
 4788   // Pass register number to verify_oop_subroutine
 4789   const char* b = nullptr;
 4790   {
 4791     ResourceMark rm;
 4792     stringStream ss;
 4793     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4794     b = code_string(ss.as_string());
 4795   }
 4796   ExternalAddress buffer((address) b);
 4797   pushptr(buffer.addr(), rscratch1);
 4798 
 4799   // call indirectly to solve generation ordering problem

 4821   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4822   int stackElementSize = Interpreter::stackElementSize;
 4823   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4824 #ifdef ASSERT
 4825   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4826   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4827 #endif
 4828   Register             scale_reg    = noreg;
 4829   Address::ScaleFactor scale_factor = Address::no_scale;
 4830   if (arg_slot.is_constant()) {
 4831     offset += arg_slot.as_constant() * stackElementSize;
 4832   } else {
 4833     scale_reg    = arg_slot.as_register();
 4834     scale_factor = Address::times(stackElementSize);
 4835   }
 4836   offset += wordSize;           // return PC is on stack
 4837   return Address(rsp, scale_reg, scale_factor, offset);
 4838 }
 4839 
 4840 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4841   if (!VerifyOops) return;




 4842 
 4843 #ifdef _LP64
 4844   push(rscratch1);
 4845 #endif
 4846   push(rax); // save rax,
 4847   // addr may contain rsp so we will have to adjust it based on the push
 4848   // we just did (and on 64 bit we do two pushes)
 4849   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4850   // stores rax into addr which is backwards of what was intended.
 4851   if (addr.uses(rsp)) {
 4852     lea(rax, addr);
 4853     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 4854   } else {
 4855     pushptr(addr);
 4856   }
 4857 
 4858   // Pass register number to verify_oop_subroutine
 4859   const char* b = nullptr;
 4860   {
 4861     ResourceMark rm;

 5308 
 5309 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5310   // get mirror
 5311   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5312   load_method_holder(mirror, method);
 5313   movptr(mirror, Address(mirror, mirror_offset));
 5314   resolve_oop_handle(mirror, tmp);
 5315 }
 5316 
 5317 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5318   load_method_holder(rresult, rmethod);
 5319   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5320 }
 5321 
 5322 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5323   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5324   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5325   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5326 }
 5327 








 5328 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5329   assert_different_registers(src, tmp);
 5330   assert_different_registers(dst, tmp);
 5331 #ifdef _LP64
 5332   if (UseCompressedClassPointers) {
 5333     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5334     decode_klass_not_null(dst, tmp);
 5335   } else
 5336 #endif
 5337     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));





 5338 }
 5339 
 5340 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5341   assert_different_registers(src, tmp);
 5342   assert_different_registers(dst, tmp);
 5343 #ifdef _LP64
 5344   if (UseCompressedClassPointers) {
 5345     encode_klass_not_null(src, tmp);
 5346     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5347   } else
 5348 #endif
 5349     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5350 }
 5351 
 5352 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5353                                     Register tmp1, Register thread_tmp) {
 5354   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5355   decorators = AccessInternal::decorator_fixup(decorators, type);
 5356   bool as_raw = (decorators & AS_RAW) != 0;
 5357   if (as_raw) {
 5358     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5359   } else {
 5360     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5361   }
 5362 }
 5363 
 5364 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5365                                      Register tmp1, Register tmp2, Register tmp3) {
 5366   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5367   decorators = AccessInternal::decorator_fixup(decorators, type);
 5368   bool as_raw = (decorators & AS_RAW) != 0;
 5369   if (as_raw) {
 5370     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5371   } else {
 5372     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5373   }
 5374 }
 5375 








































 5376 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 5377                                    Register thread_tmp, DecoratorSet decorators) {
 5378   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 5379 }
 5380 
 5381 // Doesn't do verification, generates fixed size code
 5382 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 5383                                             Register thread_tmp, DecoratorSet decorators) {
 5384   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 5385 }
 5386 
 5387 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5388                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5389   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5390 }
 5391 
 5392 // Used for storing nulls.
 5393 void MacroAssembler::store_heap_oop_null(Address dst) {
 5394   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5395 }

 5695 
 5696 void MacroAssembler::reinit_heapbase() {
 5697   if (UseCompressedOops) {
 5698     if (Universe::heap() != nullptr) {
 5699       if (CompressedOops::base() == nullptr) {
 5700         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5701       } else {
 5702         mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());
 5703       }
 5704     } else {
 5705       movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
 5706     }
 5707   }
 5708 }
 5709 
 5710 #endif // _LP64
 5711 
 5712 #if COMPILER2_OR_JVMCI
 5713 
 5714 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5715 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5716   // cnt - number of qwords (8-byte words).
 5717   // base - start address, qword aligned.
 5718   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5719   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5720   if (use64byteVector) {
 5721     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
 5722   } else if (MaxVectorSize >= 32) {
 5723     vpxor(xtmp, xtmp, xtmp, AVX_256bit);


 5724   } else {
 5725     pxor(xtmp, xtmp);

 5726   }
 5727   jmp(L_zero_64_bytes);
 5728 
 5729   BIND(L_loop);
 5730   if (MaxVectorSize >= 32) {
 5731     fill64(base, 0, xtmp, use64byteVector);
 5732   } else {
 5733     movdqu(Address(base,  0), xtmp);
 5734     movdqu(Address(base, 16), xtmp);
 5735     movdqu(Address(base, 32), xtmp);
 5736     movdqu(Address(base, 48), xtmp);
 5737   }
 5738   addptr(base, 64);
 5739 
 5740   BIND(L_zero_64_bytes);
 5741   subptr(cnt, 8);
 5742   jccb(Assembler::greaterEqual, L_loop);
 5743 
 5744   // Copy trailing 64 bytes
 5745   if (use64byteVector) {
 5746     addptr(cnt, 8);
 5747     jccb(Assembler::equal, L_end);
 5748     fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
 5749     jmp(L_end);
 5750   } else {
 5751     addptr(cnt, 4);
 5752     jccb(Assembler::less, L_tail);
 5753     if (MaxVectorSize >= 32) {
 5754       vmovdqu(Address(base, 0), xtmp);
 5755     } else {
 5756       movdqu(Address(base,  0), xtmp);
 5757       movdqu(Address(base, 16), xtmp);
 5758     }
 5759   }
 5760   addptr(base, 32);
 5761   subptr(cnt, 4);
 5762 
 5763   BIND(L_tail);
 5764   addptr(cnt, 4);
 5765   jccb(Assembler::lessEqual, L_end);
 5766   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5767     fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
 5768   } else {
 5769     decrement(cnt);
 5770 
 5771     BIND(L_sloop);
 5772     movq(Address(base, 0), xtmp);
 5773     addptr(base, 8);
 5774     decrement(cnt);
 5775     jccb(Assembler::greaterEqual, L_sloop);
 5776   }
 5777   BIND(L_end);
 5778 }
 5779 














































































































































































































































































































































































































 5780 // Clearing constant sized memory using YMM/ZMM registers.
 5781 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5782   assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), "");
 5783   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 5784 
 5785   int vector64_count = (cnt & (~0x7)) >> 3;
 5786   cnt = cnt & 0x7;
 5787   const int fill64_per_loop = 4;
 5788   const int max_unrolled_fill64 = 8;
 5789 
 5790   // 64 byte initialization loop.
 5791   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 5792   int start64 = 0;
 5793   if (vector64_count > max_unrolled_fill64) {
 5794     Label LOOP;
 5795     Register index = rtmp;
 5796 
 5797     start64 = vector64_count - (vector64_count % fill64_per_loop);
 5798 
 5799     movl(index, 0);

 5849         break;
 5850       case 7:
 5851         if (use64byteVector) {
 5852           movl(rtmp, 0x7F);
 5853           kmovwl(mask, rtmp);
 5854           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5855         } else {
 5856           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5857           movl(rtmp, 0x7);
 5858           kmovwl(mask, rtmp);
 5859           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 5860         }
 5861         break;
 5862       default:
 5863         fatal("Unexpected length : %d\n",cnt);
 5864         break;
 5865     }
 5866   }
 5867 }
 5868 
 5869 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
 5870                                bool is_large, KRegister mask) {
 5871   // cnt      - number of qwords (8-byte words).
 5872   // base     - start address, qword aligned.
 5873   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 5874   assert(base==rdi, "base register must be edi for rep stos");
 5875   assert(tmp==rax,   "tmp register must be eax for rep stos");
 5876   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 5877   assert(InitArrayShortSize % BytesPerLong == 0,
 5878     "InitArrayShortSize should be the multiple of BytesPerLong");
 5879 
 5880   Label DONE;
 5881   if (!is_large || !UseXMMForObjInit) {
 5882     xorptr(tmp, tmp);
 5883   }
 5884 
 5885   if (!is_large) {
 5886     Label LOOP, LONG;
 5887     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 5888     jccb(Assembler::greater, LONG);
 5889 
 5890     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 5891 
 5892     decrement(cnt);
 5893     jccb(Assembler::negative, DONE); // Zero length
 5894 
 5895     // Use individual pointer-sized stores for small counts:
 5896     BIND(LOOP);
 5897     movptr(Address(base, cnt, Address::times_ptr), tmp);
 5898     decrement(cnt);
 5899     jccb(Assembler::greaterEqual, LOOP);
 5900     jmpb(DONE);
 5901 
 5902     BIND(LONG);
 5903   }
 5904 
 5905   // Use longer rep-prefixed ops for non-small counts:
 5906   if (UseFastStosb) {
 5907     shlptr(cnt, 3); // convert to number of bytes
 5908     rep_stosb();
 5909   } else if (UseXMMForObjInit) {
 5910     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
 5911   } else {
 5912     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 5913     rep_stos();
 5914   }
 5915 
 5916   BIND(DONE);
 5917 }
 5918 
 5919 #endif //COMPILER2_OR_JVMCI
 5920 
 5921 
 5922 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 5923                                    Register to, Register value, Register count,
 5924                                    Register rtmp, XMMRegister xtmp) {
 5925   ShortBranchVerifier sbv(this);
 5926   assert_different_registers(to, value, count, rtmp);
 5927   Label L_exit;
 5928   Label L_fill_2_bytes, L_fill_4_bytes;
 5929 
 5930 #if defined(COMPILER2) && defined(_LP64)

 9929 
 9930   // Load top.
 9931   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9932 
 9933   // Check if the lock-stack is full.
 9934   cmpl(top, LockStack::end_offset());
 9935   jcc(Assembler::greaterEqual, slow);
 9936 
 9937   // Check for recursion.
 9938   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
 9939   jcc(Assembler::equal, push);
 9940 
 9941   // Check header for monitor (0b10).
 9942   testptr(reg_rax, markWord::monitor_value);
 9943   jcc(Assembler::notZero, slow);
 9944 
 9945   // Try to lock. Transition lock bits 0b01 => 0b00
 9946   movptr(tmp, reg_rax);
 9947   andptr(tmp, ~(int32_t)markWord::unlocked_value);
 9948   orptr(reg_rax, markWord::unlocked_value);




 9949   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
 9950   jcc(Assembler::notEqual, slow);
 9951 
 9952   // Restore top, CAS clobbers register.
 9953   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9954 
 9955   bind(push);
 9956   // After successful lock, push object on lock-stack.
 9957   movptr(Address(thread, top), obj);
 9958   incrementl(top, oopSize);
 9959   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
 9960 }
 9961 
 9962 // Implements lightweight-unlocking.
 9963 //
 9964 // obj: the object to be unlocked
 9965 // reg_rax: rax
 9966 // thread: the thread
 9967 // tmp: a temporary register
 9968 //

   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "precompiled.hpp"
   26 #include "asm/assembler.hpp"
   27 #include "asm/assembler.inline.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"
   31 #include "ci/ciInlineKlass.hpp"
   32 #include "crc32c.h"
   33 #include "gc/shared/barrierSet.hpp"
   34 #include "gc/shared/barrierSetAssembler.hpp"
   35 #include "gc/shared/collectedHeap.inline.hpp"
   36 #include "gc/shared/tlab_globals.hpp"
   37 #include "interpreter/bytecodeHistogram.hpp"
   38 #include "interpreter/interpreter.hpp"
   39 #include "jvm.h"
   40 #include "memory/resourceArea.hpp"
   41 #include "memory/universe.hpp"
   42 #include "oops/accessDecorators.hpp"
   43 #include "oops/compressedKlass.inline.hpp"
   44 #include "oops/compressedOops.inline.hpp"
   45 #include "oops/klass.inline.hpp"
   46 #include "oops/resolvedFieldEntry.hpp"
   47 #include "prims/methodHandles.hpp"
   48 #include "runtime/continuation.hpp"
   49 #include "runtime/interfaceSupport.inline.hpp"
   50 #include "runtime/javaThread.hpp"
   51 #include "runtime/jniHandles.hpp"
   52 #include "runtime/objectMonitor.hpp"
   53 #include "runtime/os.hpp"
   54 #include "runtime/safepoint.hpp"
   55 #include "runtime/safepointMechanism.hpp"
   56 #include "runtime/sharedRuntime.hpp"
   57 #include "runtime/signature_cc.hpp"
   58 #include "runtime/stubRoutines.hpp"
   59 #include "utilities/checkedCast.hpp"
   60 #include "utilities/macros.hpp"
   61 #include "vmreg_x86.inline.hpp"
   62 #ifdef COMPILER2
   63 #include "opto/output.hpp"
   64 #endif
   65 
   66 #ifdef PRODUCT
   67 #define BLOCK_COMMENT(str) /* nothing */
   68 #define STOP(error) stop(error)
   69 #else
   70 #define BLOCK_COMMENT(str) block_comment(str)
   71 #define STOP(error) block_comment(error); stop(error)
   72 #endif
   73 
   74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   75 
   76 #ifdef ASSERT
   77 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   78 #endif
   79 
   80 static const Assembler::Condition reverse[] = {
   81     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   82     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   83     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   84     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1708 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1709   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1710   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1711   pass_arg2(this, arg_2);
 1712   pass_arg1(this, arg_1);
 1713   pass_arg0(this, arg_0);
 1714   call_VM_leaf(entry_point, 3);
 1715 }
 1716 
 1717 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1718   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3));
 1719   LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3));
 1720   LP64_ONLY(assert_different_registers(arg_2, c_rarg3));
 1721   pass_arg3(this, arg_3);
 1722   pass_arg2(this, arg_2);
 1723   pass_arg1(this, arg_1);
 1724   pass_arg0(this, arg_0);
 1725   call_VM_leaf(entry_point, 3);
 1726 }
 1727 
 1728 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1729   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1730 }
 1731 
 1732 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1733   pass_arg0(this, arg_0);
 1734   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1735 }
 1736 
 1737 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1738   LP64_ONLY(assert_different_registers(arg_0, c_rarg1));
 1739   pass_arg1(this, arg_1);
 1740   pass_arg0(this, arg_0);
 1741   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1742 }
 1743 
 1744 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1745   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1746   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1747   pass_arg2(this, arg_2);
 1748   pass_arg1(this, arg_1);
 1749   pass_arg0(this, arg_0);
 1750   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1751 }

 2896     lea(rscratch, src);
 2897     Assembler::mulss(dst, Address(rscratch, 0));
 2898   }
 2899 }
 2900 
 2901 void MacroAssembler::null_check(Register reg, int offset) {
 2902   if (needs_explicit_null_check(offset)) {
 2903     // provoke OS null exception if reg is null by
 2904     // accessing M[reg] w/o changing any (non-CC) registers
 2905     // NOTE: cmpl is plenty here to provoke a segv
 2906     cmpptr(rax, Address(reg, 0));
 2907     // Note: should probably use testl(rax, Address(reg, 0));
 2908     //       may be shorter code (however, this version of
 2909     //       testl needs to be implemented first)
 2910   } else {
 2911     // nothing to do, (later) access of M[reg + offset]
 2912     // will provoke OS null exception if reg is null
 2913   }
 2914 }
 2915 
 2916 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2917   andptr(markword, markWord::inline_type_mask_in_place);
 2918   cmpptr(markword, markWord::inline_type_pattern);
 2919   jcc(Assembler::equal, is_inline_type);
 2920 }
 2921 
 2922 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
 2923   movl(temp_reg, Address(klass, Klass::access_flags_offset()));
 2924   testl(temp_reg, JVM_ACC_IDENTITY);
 2925   jcc(Assembler::zero, is_inline_type);
 2926 }
 2927 
 2928 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
 2929   testptr(object, object);
 2930   jcc(Assembler::zero, not_inline_type);
 2931   const int is_inline_type_mask = markWord::inline_type_pattern;
 2932   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2933   andptr(tmp, is_inline_type_mask);
 2934   cmpptr(tmp, is_inline_type_mask);
 2935   jcc(Assembler::notEqual, not_inline_type);
 2936 }
 2937 
 2938 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
 2939 #ifdef ASSERT
 2940   {
 2941     Label done_check;
 2942     test_klass_is_inline_type(klass, temp_reg, done_check);
 2943     stop("test_klass_is_empty_inline_type with non inline type klass");
 2944     bind(done_check);
 2945   }
 2946 #endif
 2947   movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset()));
 2948   testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value());
 2949   jcc(Assembler::notZero, is_empty_inline_type);
 2950 }
 2951 
 2952 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2953   movl(temp_reg, flags);
 2954   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2955   jcc(Assembler::notEqual, is_null_free_inline_type);
 2956 }
 2957 
 2958 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2959   movl(temp_reg, flags);
 2960   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2961   jcc(Assembler::equal, not_null_free_inline_type);
 2962 }
 2963 
 2964 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
 2965   movl(temp_reg, flags);
 2966   testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
 2967   jcc(Assembler::notEqual, is_flat);
 2968 }
 2969 
 2970 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
 2971   movl(temp_reg, flags);
 2972   testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
 2973   jcc(Assembler::notEqual, has_null_marker);
 2974 }
 2975 
 2976 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2977   Label test_mark_word;
 2978   // load mark word
 2979   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2980   // check displaced
 2981   testl(temp_reg, markWord::unlocked_value);
 2982   jccb(Assembler::notZero, test_mark_word);
 2983   // slow path use klass prototype
 2984   push(rscratch1);
 2985   load_prototype_header(temp_reg, oop, rscratch1);
 2986   pop(rscratch1);
 2987 
 2988   bind(test_mark_word);
 2989   testl(temp_reg, test_bit);
 2990   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2991 }
 2992 
 2993 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
 2994                                          Label& is_flat_array) {
 2995 #ifdef _LP64
 2996   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
 2997 #else
 2998   load_klass(temp_reg, oop, noreg);
 2999   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 3000   test_flat_array_layout(temp_reg, is_flat_array);
 3001 #endif
 3002 }
 3003 
 3004 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
 3005                                              Label& is_non_flat_array) {
 3006 #ifdef _LP64
 3007   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
 3008 #else
 3009   load_klass(temp_reg, oop, noreg);
 3010   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 3011   test_non_flat_array_layout(temp_reg, is_non_flat_array);
 3012 #endif
 3013 }
 3014 
 3015 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 3016 #ifdef _LP64
 3017   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 3018 #else
 3019   Unimplemented();
 3020 #endif
 3021 }
 3022 
 3023 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 3024 #ifdef _LP64
 3025   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 3026 #else
 3027   Unimplemented();
 3028 #endif
 3029 }
 3030 
 3031 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
 3032   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 3033   jcc(Assembler::notZero, is_flat_array);
 3034 }
 3035 
 3036 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
 3037   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 3038   jcc(Assembler::zero, is_non_flat_array);
 3039 }
 3040 
 3041 void MacroAssembler::os_breakpoint() {
 3042   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 3043   // (e.g., MSVC can't call ps() otherwise)
 3044   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 3045 }
 3046 
 3047 void MacroAssembler::unimplemented(const char* what) {
 3048   const char* buf = nullptr;
 3049   {
 3050     ResourceMark rm;
 3051     stringStream ss;
 3052     ss.print("unimplemented: %s", what);
 3053     buf = code_string(ss.as_string());
 3054   }
 3055   stop(buf);
 3056 }
 3057 
 3058 #ifdef _LP64
 3059 #define XSTATE_BV 0x200
 3060 #endif

 4175 }
 4176 
 4177 // C++ bool manipulation
 4178 void MacroAssembler::testbool(Register dst) {
 4179   if(sizeof(bool) == 1)
 4180     testb(dst, 0xff);
 4181   else if(sizeof(bool) == 2) {
 4182     // testw implementation needed for two byte bools
 4183     ShouldNotReachHere();
 4184   } else if(sizeof(bool) == 4)
 4185     testl(dst, dst);
 4186   else
 4187     // unsupported
 4188     ShouldNotReachHere();
 4189 }
 4190 
 4191 void MacroAssembler::testptr(Register dst, Register src) {
 4192   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 4193 }
 4194 
 4195 // Object / value buffer allocation...
 4196 //
 4197 // Kills klass and rsi on LP64
 4198 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 4199                                        Register t1, Register t2,
 4200                                        bool clear_fields, Label& alloc_failed)
 4201 {
 4202   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 4203   Register layout_size = t1;
 4204   assert(new_obj == rax, "needs to be rax");
 4205   assert_different_registers(klass, new_obj, t1, t2);
 4206 
 4207   // get instance_size in InstanceKlass (scaled to a count of bytes)
 4208   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 4209   // test to see if it has a finalizer or is malformed in some way
 4210   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 4211   jcc(Assembler::notZero, slow_case_no_pop);
 4212 
 4213   // Allocate the instance:
 4214   //  If TLAB is enabled:
 4215   //    Try to allocate in the TLAB.
 4216   //    If fails, go to the slow path.
 4217   //  Else If inline contiguous allocations are enabled:
 4218   //    Try to allocate in eden.
 4219   //    If fails due to heap end, go to slow path.
 4220   //
 4221   //  If TLAB is enabled OR inline contiguous is enabled:
 4222   //    Initialize the allocation.
 4223   //    Exit.
 4224   //
 4225   //  Go to slow path.
 4226 
 4227   push(klass);
 4228   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass);
 4229 #ifndef _LP64
 4230   if (UseTLAB) {
 4231     get_thread(thread);
 4232   }
 4233 #endif // _LP64
 4234 
 4235   if (UseTLAB) {
 4236     tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case);
 4237     if (ZeroTLAB || (!clear_fields)) {
 4238       // the fields have been already cleared
 4239       jmp(initialize_header);
 4240     } else {
 4241       // initialize both the header and fields
 4242       jmp(initialize_object);
 4243     }
 4244   } else {
 4245     jmp(slow_case);
 4246   }
 4247 
 4248   // If UseTLAB is true, the object is created above and there is an initialize need.
 4249   // Otherwise, skip and go to the slow path.
 4250   if (UseTLAB) {
 4251     if (clear_fields) {
 4252       // The object is initialized before the header.  If the object size is
 4253       // zero, go directly to the header initialization.
 4254       bind(initialize_object);
 4255       decrement(layout_size, sizeof(oopDesc));
 4256       jcc(Assembler::zero, initialize_header);
 4257 
 4258       // Initialize topmost object field, divide size by 8, check if odd and
 4259       // test if zero.
 4260       Register zero = klass;
 4261       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 4262       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 4263 
 4264   #ifdef ASSERT
 4265       // make sure instance_size was multiple of 8
 4266       Label L;
 4267       // Ignore partial flag stall after shrl() since it is debug VM
 4268       jcc(Assembler::carryClear, L);
 4269       stop("object size is not multiple of 2 - adjust this code");
 4270       bind(L);
 4271       // must be > 0, no extra check needed here
 4272   #endif
 4273 
 4274       // initialize remaining object fields: instance_size was a multiple of 8
 4275       {
 4276         Label loop;
 4277         bind(loop);
 4278         movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero);
 4279         NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero));
 4280         decrement(layout_size);
 4281         jcc(Assembler::notZero, loop);
 4282       }
 4283     } // clear_fields
 4284 
 4285     // initialize object header only.
 4286     bind(initialize_header);
 4287     pop(klass);
 4288     Register mark_word = t2;
 4289     movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 4290     movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 4291 #ifdef _LP64
 4292     xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 4293     store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 4294 #endif
 4295     movptr(t2, klass);         // preserve klass
 4296     store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 4297 
 4298     jmp(done);
 4299   }
 4300 
 4301   bind(slow_case);
 4302   pop(klass);
 4303   bind(slow_case_no_pop);
 4304   jmp(alloc_failed);
 4305 
 4306   bind(done);
 4307 }
 4308 
 4309 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 4310 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 4311                                    Register var_size_in_bytes,
 4312                                    int con_size_in_bytes,
 4313                                    Register t1,
 4314                                    Register t2,
 4315                                    Label& slow_case) {
 4316   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 4317   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 4318 }
 4319 
 4320 RegSet MacroAssembler::call_clobbered_gp_registers() {
 4321   RegSet regs;
 4322 #ifdef _LP64
 4323   regs += RegSet::of(rax, rcx, rdx);
 4324 #ifndef WINDOWS
 4325   regs += RegSet::of(rsi, rdi);
 4326 #endif
 4327   regs += RegSet::range(r8, r11);
 4328 #else

 4542     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4543     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4544     // index could be 0 now, must check again
 4545     jcc(Assembler::zero, done);
 4546     bind(even);
 4547   }
 4548 #endif // !_LP64
 4549   // initialize remaining object fields: index is a multiple of 2 now
 4550   {
 4551     Label loop;
 4552     bind(loop);
 4553     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4554     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4555     decrement(index);
 4556     jcc(Assembler::notZero, loop);
 4557   }
 4558 
 4559   bind(done);
 4560 }
 4561 
 4562 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) {
 4563   movptr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset()));
 4564 #ifdef ASSERT
 4565   {
 4566     Label done;
 4567     cmpptr(inline_klass, 0);
 4568     jcc(Assembler::notEqual, done);
 4569     stop("get_inline_type_field_klass contains no inline klass");
 4570     bind(done);
 4571   }
 4572 #endif
 4573   movptr(inline_klass, Address(inline_klass, index, Address::times_ptr, Array<InlineKlass*>::base_offset_in_bytes()));
 4574 }
 4575 
 4576 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
 4577 #ifdef ASSERT
 4578   {
 4579     Label done_check;
 4580     test_klass_is_inline_type(inline_klass, temp_reg, done_check);
 4581     stop("get_default_value_oop from non inline type klass");
 4582     bind(done_check);
 4583   }
 4584 #endif
 4585   Register offset = temp_reg;
 4586   // Getting the offset of the pre-allocated default value
 4587   movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
 4588   movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
 4589 
 4590   // Getting the mirror
 4591   movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
 4592   resolve_oop_handle(obj, inline_klass);
 4593 
 4594   // Getting the pre-allocated default value from the mirror
 4595   Address field(obj, offset, Address::times_1);
 4596   load_heap_oop(obj, field);
 4597 }
 4598 
 4599 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
 4600 #ifdef ASSERT
 4601   {
 4602     Label done_check;
 4603     test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
 4604     stop("get_empty_value from non-empty inline klass");
 4605     bind(done_check);
 4606   }
 4607 #endif
 4608   get_default_value_oop(inline_klass, temp_reg, obj);
 4609 }
 4610 
 4611 
 4612 // Look up the method for a megamorphic invokeinterface call.
 4613 // The target method is determined by <intf_klass, itable_index>.
 4614 // The receiver klass is in recv_klass.
 4615 // On success, the result will be in method_result, and execution falls through.
 4616 // On failure, execution transfers to the given label.
 4617 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4618                                              Register intf_klass,
 4619                                              RegisterOrConstant itable_index,
 4620                                              Register method_result,
 4621                                              Register scan_temp,
 4622                                              Label& L_no_such_interface,
 4623                                              bool return_method) {
 4624   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4625   assert_different_registers(method_result, intf_klass, scan_temp);
 4626   assert(recv_klass != method_result || !return_method,
 4627          "recv_klass can be destroyed when method isn't needed");
 4628 
 4629   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4630          "caller must use same register for non-constant itable index as for method");
 4631 

 5059   } else {
 5060     Label L;
 5061     jccb(negate_condition(cc), L);
 5062     movl(dst, src);
 5063     bind(L);
 5064   }
 5065 }
 5066 
 5067 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 5068   if (VM_Version::supports_cmov()) {
 5069     cmovl(cc, dst, src);
 5070   } else {
 5071     Label L;
 5072     jccb(negate_condition(cc), L);
 5073     movl(dst, src);
 5074     bind(L);
 5075   }
 5076 }
 5077 
 5078 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 5079   if (!VerifyOops || VerifyAdapterSharing) {
 5080     // Below address of the code string confuses VerifyAdapterSharing
 5081     // because it may differ between otherwise equivalent adapters.
 5082     return;
 5083   }
 5084 
 5085   BLOCK_COMMENT("verify_oop {");
 5086 #ifdef _LP64
 5087   push(rscratch1);
 5088 #endif
 5089   push(rax);                          // save rax
 5090   push(reg);                          // pass register argument
 5091 
 5092   // Pass register number to verify_oop_subroutine
 5093   const char* b = nullptr;
 5094   {
 5095     ResourceMark rm;
 5096     stringStream ss;
 5097     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 5098     b = code_string(ss.as_string());
 5099   }
 5100   ExternalAddress buffer((address) b);
 5101   pushptr(buffer.addr(), rscratch1);
 5102 
 5103   // call indirectly to solve generation ordering problem

 5125   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 5126   int stackElementSize = Interpreter::stackElementSize;
 5127   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 5128 #ifdef ASSERT
 5129   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 5130   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 5131 #endif
 5132   Register             scale_reg    = noreg;
 5133   Address::ScaleFactor scale_factor = Address::no_scale;
 5134   if (arg_slot.is_constant()) {
 5135     offset += arg_slot.as_constant() * stackElementSize;
 5136   } else {
 5137     scale_reg    = arg_slot.as_register();
 5138     scale_factor = Address::times(stackElementSize);
 5139   }
 5140   offset += wordSize;           // return PC is on stack
 5141   return Address(rsp, scale_reg, scale_factor, offset);
 5142 }
 5143 
 5144 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5145   if (!VerifyOops || VerifyAdapterSharing) {
 5146     // Below address of the code string confuses VerifyAdapterSharing
 5147     // because it may differ between otherwise equivalent adapters.
 5148     return;
 5149   }
 5150 
 5151 #ifdef _LP64
 5152   push(rscratch1);
 5153 #endif
 5154   push(rax); // save rax,
 5155   // addr may contain rsp so we will have to adjust it based on the push
 5156   // we just did (and on 64 bit we do two pushes)
 5157   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5158   // stores rax into addr which is backwards of what was intended.
 5159   if (addr.uses(rsp)) {
 5160     lea(rax, addr);
 5161     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 5162   } else {
 5163     pushptr(addr);
 5164   }
 5165 
 5166   // Pass register number to verify_oop_subroutine
 5167   const char* b = nullptr;
 5168   {
 5169     ResourceMark rm;

 5616 
 5617 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5618   // get mirror
 5619   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5620   load_method_holder(mirror, method);
 5621   movptr(mirror, Address(mirror, mirror_offset));
 5622   resolve_oop_handle(mirror, tmp);
 5623 }
 5624 
 5625 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5626   load_method_holder(rresult, rmethod);
 5627   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5628 }
 5629 
 5630 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5631   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5632   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5633   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5634 }
 5635 
 5636 void MacroAssembler::load_metadata(Register dst, Register src) {
 5637   if (UseCompressedClassPointers) {
 5638     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5639   } else {
 5640     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5641   }
 5642 }
 5643 
 5644 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5645   assert_different_registers(src, tmp);
 5646   assert_different_registers(dst, tmp);
 5647 #ifdef _LP64
 5648   if (UseCompressedClassPointers) {
 5649     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5650     decode_klass_not_null(dst, tmp);
 5651   } else
 5652 #endif
 5653   movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5654 }
 5655 
 5656 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5657   load_klass(dst, src, tmp);
 5658   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5659 }
 5660 
 5661 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5662   assert_different_registers(src, tmp);
 5663   assert_different_registers(dst, tmp);
 5664 #ifdef _LP64
 5665   if (UseCompressedClassPointers) {
 5666     encode_klass_not_null(src, tmp);
 5667     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5668   } else
 5669 #endif
 5670     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5671 }
 5672 
 5673 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5674                                     Register tmp1, Register thread_tmp) {
 5675   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5676   decorators = AccessInternal::decorator_fixup(decorators, type);
 5677   bool as_raw = (decorators & AS_RAW) != 0;
 5678   if (as_raw) {
 5679     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5680   } else {
 5681     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5682   }
 5683 }
 5684 
 5685 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5686                                      Register tmp1, Register tmp2, Register tmp3) {
 5687   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5688   decorators = AccessInternal::decorator_fixup(decorators, type);
 5689   bool as_raw = (decorators & AS_RAW) != 0;
 5690   if (as_raw) {
 5691     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5692   } else {
 5693     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5694   }
 5695 }
 5696 
 5697 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst,
 5698                                        Register inline_klass) {
 5699   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5700   bs->value_copy(this, decorators, src, dst, inline_klass);
 5701 }
 5702 
 5703 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) {
 5704   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 5705   movl(offset, Address(offset, InlineKlass::first_field_offset_offset()));
 5706 }
 5707 
 5708 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) {
 5709   // ((address) (void*) o) + vk->first_field_offset();
 5710   Register offset = (data == oop) ? rscratch1 : data;
 5711   first_field_offset(inline_klass, offset);
 5712   if (data == oop) {
 5713     addptr(data, offset);
 5714   } else {
 5715     lea(data, Address(oop, offset));
 5716   }
 5717 }
 5718 
 5719 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5720                                                 Register index, Register data) {
 5721   assert(index != rcx, "index needs to shift by rcx");
 5722   assert_different_registers(array, array_klass, index);
 5723   assert_different_registers(rcx, array, index);
 5724 
 5725   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5726   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5727 
 5728   // Klass::layout_helper_log2_element_size(lh)
 5729   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5730   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5731   andl(rcx, Klass::_lh_log2_element_size_mask);
 5732   shlptr(index); // index << rcx
 5733 
 5734   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT)));
 5735 }
 5736 
 5737 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 5738                                    Register thread_tmp, DecoratorSet decorators) {
 5739   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 5740 }
 5741 
 5742 // Doesn't do verification, generates fixed size code
 5743 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 5744                                             Register thread_tmp, DecoratorSet decorators) {
 5745   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 5746 }
 5747 
 5748 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5749                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5750   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5751 }
 5752 
 5753 // Used for storing nulls.
 5754 void MacroAssembler::store_heap_oop_null(Address dst) {
 5755   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5756 }

 6056 
 6057 void MacroAssembler::reinit_heapbase() {
 6058   if (UseCompressedOops) {
 6059     if (Universe::heap() != nullptr) {
 6060       if (CompressedOops::base() == nullptr) {
 6061         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 6062       } else {
 6063         mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());
 6064       }
 6065     } else {
 6066       movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
 6067     }
 6068   }
 6069 }
 6070 
 6071 #endif // _LP64
 6072 
 6073 #if COMPILER2_OR_JVMCI
 6074 
 6075 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 6076 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 6077   // cnt - number of qwords (8-byte words).
 6078   // base - start address, qword aligned.
 6079   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 6080   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 6081   if (use64byteVector) {
 6082     evpbroadcastq(xtmp, val, AVX_512bit);
 6083   } else if (MaxVectorSize >= 32) {
 6084     movdq(xtmp, val);
 6085     punpcklqdq(xtmp, xtmp);
 6086     vinserti128_high(xtmp, xtmp);
 6087   } else {
 6088     movdq(xtmp, val);
 6089     punpcklqdq(xtmp, xtmp);
 6090   }
 6091   jmp(L_zero_64_bytes);
 6092 
 6093   BIND(L_loop);
 6094   if (MaxVectorSize >= 32) {
 6095     fill64(base, 0, xtmp, use64byteVector);
 6096   } else {
 6097     movdqu(Address(base,  0), xtmp);
 6098     movdqu(Address(base, 16), xtmp);
 6099     movdqu(Address(base, 32), xtmp);
 6100     movdqu(Address(base, 48), xtmp);
 6101   }
 6102   addptr(base, 64);
 6103 
 6104   BIND(L_zero_64_bytes);
 6105   subptr(cnt, 8);
 6106   jccb(Assembler::greaterEqual, L_loop);
 6107 
 6108   // Copy trailing 64 bytes
 6109   if (use64byteVector) {
 6110     addptr(cnt, 8);
 6111     jccb(Assembler::equal, L_end);
 6112     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 6113     jmp(L_end);
 6114   } else {
 6115     addptr(cnt, 4);
 6116     jccb(Assembler::less, L_tail);
 6117     if (MaxVectorSize >= 32) {
 6118       vmovdqu(Address(base, 0), xtmp);
 6119     } else {
 6120       movdqu(Address(base,  0), xtmp);
 6121       movdqu(Address(base, 16), xtmp);
 6122     }
 6123   }
 6124   addptr(base, 32);
 6125   subptr(cnt, 4);
 6126 
 6127   BIND(L_tail);
 6128   addptr(cnt, 4);
 6129   jccb(Assembler::lessEqual, L_end);
 6130   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 6131     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 6132   } else {
 6133     decrement(cnt);
 6134 
 6135     BIND(L_sloop);
 6136     movq(Address(base, 0), xtmp);
 6137     addptr(base, 8);
 6138     decrement(cnt);
 6139     jccb(Assembler::greaterEqual, L_sloop);
 6140   }
 6141   BIND(L_end);
 6142 }
 6143 
 6144 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 6145   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 6146   // An inline type might be returned. If fields are in registers we
 6147   // need to allocate an inline type instance and initialize it with
 6148   // the value of the fields.
 6149   Label skip;
 6150   // We only need a new buffered inline type if a new one is not returned
 6151   testptr(rax, 1);
 6152   jcc(Assembler::zero, skip);
 6153   int call_offset = -1;
 6154 
 6155 #ifdef _LP64
 6156   // The following code is similar to allocate_instance but has some slight differences,
 6157   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 6158   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
 6159   Label slow_case;
 6160   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 6161   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 6162   if (vk != nullptr) {
 6163     // Called from C1, where the return type is statically known.
 6164     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 6165     jint obj_size = vk->layout_helper();
 6166     assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 6167     if (UseTLAB) {
 6168       tlab_allocate(r15_thread, rax, noreg, obj_size, r13, r14, slow_case);
 6169     } else {
 6170       jmp(slow_case);
 6171     }
 6172   } else {
 6173     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 6174     mov(rbx, rax);
 6175     andptr(rbx, -2);
 6176     movl(r14, Address(rbx, Klass::layout_helper_offset()));
 6177     if (UseTLAB) {
 6178       tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case);
 6179     } else {
 6180       jmp(slow_case);
 6181     }
 6182   }
 6183   if (UseTLAB) {
 6184     // 2. Initialize buffered inline instance header
 6185     Register buffer_obj = rax;
 6186     movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 6187     xorl(r13, r13);
 6188     store_klass_gap(buffer_obj, r13);
 6189     if (vk == nullptr) {
 6190       // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 6191       mov(r13, rbx);
 6192     }
 6193     store_klass(buffer_obj, rbx, rscratch1);
 6194     // 3. Initialize its fields with an inline class specific handler
 6195     if (vk != nullptr) {
 6196       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 6197     } else {
 6198       movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 6199       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 6200       call(rbx);
 6201     }
 6202     jmp(skip);
 6203   }
 6204   bind(slow_case);
 6205   // We failed to allocate a new inline type, fall back to a runtime
 6206   // call. Some oop field may be live in some registers but we can't
 6207   // tell. That runtime call will take care of preserving them
 6208   // across a GC if there's one.
 6209   mov(rax, rscratch1);
 6210 #endif
 6211 
 6212   if (from_interpreter) {
 6213     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 6214   } else {
 6215     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 6216     call_offset = offset();
 6217   }
 6218 
 6219   bind(skip);
 6220   return call_offset;
 6221 }
 6222 
 6223 // Move a value between registers/stack slots and update the reg_state
 6224 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6225   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6226   if (reg_state[to->value()] == reg_written) {
 6227     return true; // Already written
 6228   }
 6229   if (from != to && bt != T_VOID) {
 6230     if (reg_state[to->value()] == reg_readonly) {
 6231       return false; // Not yet writable
 6232     }
 6233     if (from->is_reg()) {
 6234       if (to->is_reg()) {
 6235         if (from->is_XMMRegister()) {
 6236           if (bt == T_DOUBLE) {
 6237             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6238           } else {
 6239             assert(bt == T_FLOAT, "must be float");
 6240             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6241           }
 6242         } else {
 6243           movq(to->as_Register(), from->as_Register());
 6244         }
 6245       } else {
 6246         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6247         Address to_addr = Address(rsp, st_off);
 6248         if (from->is_XMMRegister()) {
 6249           if (bt == T_DOUBLE) {
 6250             movdbl(to_addr, from->as_XMMRegister());
 6251           } else {
 6252             assert(bt == T_FLOAT, "must be float");
 6253             movflt(to_addr, from->as_XMMRegister());
 6254           }
 6255         } else {
 6256           movq(to_addr, from->as_Register());
 6257         }
 6258       }
 6259     } else {
 6260       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6261       if (to->is_reg()) {
 6262         if (to->is_XMMRegister()) {
 6263           if (bt == T_DOUBLE) {
 6264             movdbl(to->as_XMMRegister(), from_addr);
 6265           } else {
 6266             assert(bt == T_FLOAT, "must be float");
 6267             movflt(to->as_XMMRegister(), from_addr);
 6268           }
 6269         } else {
 6270           movq(to->as_Register(), from_addr);
 6271         }
 6272       } else {
 6273         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6274         movq(r13, from_addr);
 6275         movq(Address(rsp, st_off), r13);
 6276       }
 6277     }
 6278   }
 6279   // Update register states
 6280   reg_state[from->value()] = reg_writable;
 6281   reg_state[to->value()] = reg_written;
 6282   return true;
 6283 }
 6284 
 6285 // Calculate the extra stack space required for packing or unpacking inline
 6286 // args and adjust the stack pointer
 6287 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6288   // Two additional slots to account for return address
 6289   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6290   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6291   // Save the return address, adjust the stack (make sure it is properly
 6292   // 16-byte aligned) and copy the return address to the new top of the stack.
 6293   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6294   assert(sp_inc > 0, "sanity");
 6295   pop(r13);
 6296   subptr(rsp, sp_inc);
 6297   push(r13);
 6298   return sp_inc;
 6299 }
 6300 
 6301 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6302 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6303                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6304                                           RegState reg_state[]) {
 6305   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6306   assert(from->is_valid(), "source must be valid");
 6307   bool progress = false;
 6308 #ifdef ASSERT
 6309   const int start_offset = offset();
 6310 #endif
 6311 
 6312   Label L_null, L_notNull;
 6313   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6314   Register tmp1 = r10;
 6315   Register tmp2 = r13;
 6316   Register fromReg = noreg;
 6317   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
 6318   bool done = true;
 6319   bool mark_done = true;
 6320   VMReg toReg;
 6321   BasicType bt;
 6322   // Check if argument requires a null check
 6323   bool null_check = false;
 6324   VMReg nullCheckReg;
 6325   while (stream.next(nullCheckReg, bt)) {
 6326     if (sig->at(stream.sig_index())._offset == -1) {
 6327       null_check = true;
 6328       break;
 6329     }
 6330   }
 6331   stream.reset(sig_index, to_index);
 6332   while (stream.next(toReg, bt)) {
 6333     assert(toReg->is_valid(), "destination must be valid");
 6334     int idx = (int)toReg->value();
 6335     if (reg_state[idx] == reg_readonly) {
 6336       if (idx != from->value()) {
 6337         mark_done = false;
 6338       }
 6339       done = false;
 6340       continue;
 6341     } else if (reg_state[idx] == reg_written) {
 6342       continue;
 6343     }
 6344     assert(reg_state[idx] == reg_writable, "must be writable");
 6345     reg_state[idx] = reg_written;
 6346     progress = true;
 6347 
 6348     if (fromReg == noreg) {
 6349       if (from->is_reg()) {
 6350         fromReg = from->as_Register();
 6351       } else {
 6352         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6353         movq(tmp1, Address(rsp, st_off));
 6354         fromReg = tmp1;
 6355       }
 6356       if (null_check) {
 6357         // Nullable inline type argument, emit null check
 6358         testptr(fromReg, fromReg);
 6359         jcc(Assembler::zero, L_null);
 6360       }
 6361     }
 6362     int off = sig->at(stream.sig_index())._offset;
 6363     if (off == -1) {
 6364       assert(null_check, "Missing null check at");
 6365       if (toReg->is_stack()) {
 6366         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6367         movq(Address(rsp, st_off), 1);
 6368       } else {
 6369         movq(toReg->as_Register(), 1);
 6370       }
 6371       continue;
 6372     }
 6373     assert(off > 0, "offset in object should be positive");
 6374     Address fromAddr = Address(fromReg, off);
 6375     if (!toReg->is_XMMRegister()) {
 6376       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6377       if (is_reference_type(bt)) {
 6378         load_heap_oop(dst, fromAddr);
 6379       } else {
 6380         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6381         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6382       }
 6383       if (toReg->is_stack()) {
 6384         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6385         movq(Address(rsp, st_off), dst);
 6386       }
 6387     } else if (bt == T_DOUBLE) {
 6388       movdbl(toReg->as_XMMRegister(), fromAddr);
 6389     } else {
 6390       assert(bt == T_FLOAT, "must be float");
 6391       movflt(toReg->as_XMMRegister(), fromAddr);
 6392     }
 6393   }
 6394   if (progress && null_check) {
 6395     if (done) {
 6396       jmp(L_notNull);
 6397       bind(L_null);
 6398       // Set IsInit field to zero to signal that the argument is null.
 6399       // Also set all oop fields to zero to make the GC happy.
 6400       stream.reset(sig_index, to_index);
 6401       while (stream.next(toReg, bt)) {
 6402         if (sig->at(stream.sig_index())._offset == -1 ||
 6403             bt == T_OBJECT || bt == T_ARRAY) {
 6404           if (toReg->is_stack()) {
 6405             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6406             movq(Address(rsp, st_off), 0);
 6407           } else {
 6408             xorq(toReg->as_Register(), toReg->as_Register());
 6409           }
 6410         }
 6411       }
 6412       bind(L_notNull);
 6413     } else {
 6414       bind(L_null);
 6415     }
 6416   }
 6417 
 6418   sig_index = stream.sig_index();
 6419   to_index = stream.regs_index();
 6420 
 6421   if (mark_done && reg_state[from->value()] != reg_written) {
 6422     // This is okay because no one else will write to that slot
 6423     reg_state[from->value()] = reg_writable;
 6424   }
 6425   from_index--;
 6426   assert(progress || (start_offset == offset()), "should not emit code");
 6427   return done;
 6428 }
 6429 
 6430 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6431                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6432                                         RegState reg_state[], Register val_array) {
 6433   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
 6434   assert(to->is_valid(), "destination must be valid");
 6435 
 6436   if (reg_state[to->value()] == reg_written) {
 6437     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6438     return true; // Already written
 6439   }
 6440 
 6441   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6442   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6443   Register val_obj_tmp = r11;
 6444   Register from_reg_tmp = r14;
 6445   Register tmp1 = r10;
 6446   Register tmp2 = r13;
 6447   Register tmp3 = rbx;
 6448   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6449 
 6450   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6451 
 6452   if (reg_state[to->value()] == reg_readonly) {
 6453     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6454       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6455       return false; // Not yet writable
 6456     }
 6457     val_obj = val_obj_tmp;
 6458   }
 6459 
 6460   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
 6461   load_heap_oop(val_obj, Address(val_array, index));
 6462 
 6463   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6464   VMReg fromReg;
 6465   BasicType bt;
 6466   Label L_null;
 6467   while (stream.next(fromReg, bt)) {
 6468     assert(fromReg->is_valid(), "source must be valid");
 6469     reg_state[fromReg->value()] = reg_writable;
 6470 
 6471     int off = sig->at(stream.sig_index())._offset;
 6472     if (off == -1) {
 6473       // Nullable inline type argument, emit null check
 6474       Label L_notNull;
 6475       if (fromReg->is_stack()) {
 6476         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6477         testb(Address(rsp, ld_off), 1);
 6478       } else {
 6479         testb(fromReg->as_Register(), 1);
 6480       }
 6481       jcc(Assembler::notZero, L_notNull);
 6482       movptr(val_obj, 0);
 6483       jmp(L_null);
 6484       bind(L_notNull);
 6485       continue;
 6486     }
 6487 
 6488     assert(off > 0, "offset in object should be positive");
 6489     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6490 
 6491     Address dst(val_obj, off);
 6492     if (!fromReg->is_XMMRegister()) {
 6493       Register src;
 6494       if (fromReg->is_stack()) {
 6495         src = from_reg_tmp;
 6496         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6497         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6498       } else {
 6499         src = fromReg->as_Register();
 6500       }
 6501       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6502       if (is_reference_type(bt)) {
 6503         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6504       } else {
 6505         store_sized_value(dst, src, size_in_bytes);
 6506       }
 6507     } else if (bt == T_DOUBLE) {
 6508       movdbl(dst, fromReg->as_XMMRegister());
 6509     } else {
 6510       assert(bt == T_FLOAT, "must be float");
 6511       movflt(dst, fromReg->as_XMMRegister());
 6512     }
 6513   }
 6514   bind(L_null);
 6515   sig_index = stream.sig_index();
 6516   from_index = stream.regs_index();
 6517 
 6518   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6519   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6520   assert(success, "to register must be writeable");
 6521   return true;
 6522 }
 6523 
 6524 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6525   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6526 }
 6527 
 6528 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6529   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6530   if (needs_stack_repair) {
 6531     movq(rbp, Address(rsp, initial_framesize));
 6532     // The stack increment resides just below the saved rbp
 6533     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6534   } else {
 6535     if (initial_framesize > 0) {
 6536       addq(rsp, initial_framesize);
 6537     }
 6538     pop(rbp);
 6539   }
 6540 }
 6541 
 6542 // Clearing constant sized memory using YMM/ZMM registers.
 6543 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6544   assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), "");
 6545   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6546 
 6547   int vector64_count = (cnt & (~0x7)) >> 3;
 6548   cnt = cnt & 0x7;
 6549   const int fill64_per_loop = 4;
 6550   const int max_unrolled_fill64 = 8;
 6551 
 6552   // 64 byte initialization loop.
 6553   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 6554   int start64 = 0;
 6555   if (vector64_count > max_unrolled_fill64) {
 6556     Label LOOP;
 6557     Register index = rtmp;
 6558 
 6559     start64 = vector64_count - (vector64_count % fill64_per_loop);
 6560 
 6561     movl(index, 0);

 6611         break;
 6612       case 7:
 6613         if (use64byteVector) {
 6614           movl(rtmp, 0x7F);
 6615           kmovwl(mask, rtmp);
 6616           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6617         } else {
 6618           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6619           movl(rtmp, 0x7);
 6620           kmovwl(mask, rtmp);
 6621           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6622         }
 6623         break;
 6624       default:
 6625         fatal("Unexpected length : %d\n",cnt);
 6626         break;
 6627     }
 6628   }
 6629 }
 6630 
 6631 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6632                                bool is_large, bool word_copy_only, KRegister mask) {
 6633   // cnt      - number of qwords (8-byte words).
 6634   // base     - start address, qword aligned.
 6635   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6636   assert(base==rdi, "base register must be edi for rep stos");
 6637   assert(val==rax,   "val register must be eax for rep stos");
 6638   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6639   assert(InitArrayShortSize % BytesPerLong == 0,
 6640     "InitArrayShortSize should be the multiple of BytesPerLong");
 6641 
 6642   Label DONE;



 6643 
 6644   if (!is_large) {
 6645     Label LOOP, LONG;
 6646     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6647     jccb(Assembler::greater, LONG);
 6648 
 6649     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6650 
 6651     decrement(cnt);
 6652     jccb(Assembler::negative, DONE); // Zero length
 6653 
 6654     // Use individual pointer-sized stores for small counts:
 6655     BIND(LOOP);
 6656     movptr(Address(base, cnt, Address::times_ptr), val);
 6657     decrement(cnt);
 6658     jccb(Assembler::greaterEqual, LOOP);
 6659     jmpb(DONE);
 6660 
 6661     BIND(LONG);
 6662   }
 6663 
 6664   // Use longer rep-prefixed ops for non-small counts:
 6665   if (UseFastStosb && !word_copy_only) {
 6666     shlptr(cnt, 3); // convert to number of bytes
 6667     rep_stosb();
 6668   } else if (UseXMMForObjInit) {
 6669     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6670   } else {
 6671     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6672     rep_stos();
 6673   }
 6674 
 6675   BIND(DONE);
 6676 }
 6677 
 6678 #endif //COMPILER2_OR_JVMCI
 6679 
 6680 
 6681 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6682                                    Register to, Register value, Register count,
 6683                                    Register rtmp, XMMRegister xtmp) {
 6684   ShortBranchVerifier sbv(this);
 6685   assert_different_registers(to, value, count, rtmp);
 6686   Label L_exit;
 6687   Label L_fill_2_bytes, L_fill_4_bytes;
 6688 
 6689 #if defined(COMPILER2) && defined(_LP64)

10688 
10689   // Load top.
10690   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10691 
10692   // Check if the lock-stack is full.
10693   cmpl(top, LockStack::end_offset());
10694   jcc(Assembler::greaterEqual, slow);
10695 
10696   // Check for recursion.
10697   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10698   jcc(Assembler::equal, push);
10699 
10700   // Check header for monitor (0b10).
10701   testptr(reg_rax, markWord::monitor_value);
10702   jcc(Assembler::notZero, slow);
10703 
10704   // Try to lock. Transition lock bits 0b01 => 0b00
10705   movptr(tmp, reg_rax);
10706   andptr(tmp, ~(int32_t)markWord::unlocked_value);
10707   orptr(reg_rax, markWord::unlocked_value);
10708   if (EnableValhalla) {
10709     // Mask inline_type bit such that we go to the slow path if object is an inline type
10710     andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10711   }
10712   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10713   jcc(Assembler::notEqual, slow);
10714 
10715   // Restore top, CAS clobbers register.
10716   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10717 
10718   bind(push);
10719   // After successful lock, push object on lock-stack.
10720   movptr(Address(thread, top), obj);
10721   incrementl(top, oopSize);
10722   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10723 }
10724 
10725 // Implements lightweight-unlocking.
10726 //
10727 // obj: the object to be unlocked
10728 // reg_rax: rax
10729 // thread: the thread
10730 // tmp: a temporary register
10731 //
< prev index next >