< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/compiledIC.hpp"
   28 #include "compiler/compiler_globals.hpp"
   29 #include "compiler/disassembler.hpp"

   30 #include "crc32c.h"
   31 #include "gc/shared/barrierSet.hpp"
   32 #include "gc/shared/barrierSetAssembler.hpp"
   33 #include "gc/shared/collectedHeap.inline.hpp"
   34 #include "gc/shared/tlab_globals.hpp"
   35 #include "interpreter/bytecodeHistogram.hpp"
   36 #include "interpreter/interpreter.hpp"
   37 #include "interpreter/interpreterRuntime.hpp"
   38 #include "jvm.h"
   39 #include "memory/resourceArea.hpp"
   40 #include "memory/universe.hpp"
   41 #include "oops/accessDecorators.hpp"
   42 #include "oops/compressedKlass.inline.hpp"
   43 #include "oops/compressedOops.inline.hpp"
   44 #include "oops/klass.inline.hpp"

   45 #include "prims/methodHandles.hpp"
   46 #include "runtime/continuation.hpp"
   47 #include "runtime/interfaceSupport.inline.hpp"
   48 #include "runtime/javaThread.hpp"
   49 #include "runtime/jniHandles.hpp"
   50 #include "runtime/objectMonitor.hpp"
   51 #include "runtime/os.hpp"
   52 #include "runtime/safepoint.hpp"
   53 #include "runtime/safepointMechanism.hpp"
   54 #include "runtime/sharedRuntime.hpp"

   55 #include "runtime/stubRoutines.hpp"
   56 #include "utilities/checkedCast.hpp"
   57 #include "utilities/macros.hpp"




   58 
   59 #ifdef PRODUCT
   60 #define BLOCK_COMMENT(str) /* nothing */
   61 #define STOP(error) stop(error)
   62 #else
   63 #define BLOCK_COMMENT(str) block_comment(str)
   64 #define STOP(error) block_comment(error); stop(error)
   65 #endif
   66 
   67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   68 
   69 #ifdef ASSERT
   70 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   71 #endif
   72 
   73 static const Assembler::Condition reverse[] = {
   74     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   75     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   76     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   77     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1719 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1720   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1721   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1722   pass_arg2(this, arg_2);
 1723   pass_arg1(this, arg_1);
 1724   pass_arg0(this, arg_0);
 1725   call_VM_leaf(entry_point, 3);
 1726 }
 1727 
 1728 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1729   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3));
 1730   LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3));
 1731   LP64_ONLY(assert_different_registers(arg_2, c_rarg3));
 1732   pass_arg3(this, arg_3);
 1733   pass_arg2(this, arg_2);
 1734   pass_arg1(this, arg_1);
 1735   pass_arg0(this, arg_0);
 1736   call_VM_leaf(entry_point, 3);
 1737 }
 1738 




 1739 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1740   pass_arg0(this, arg_0);
 1741   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1742 }
 1743 
 1744 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1745   LP64_ONLY(assert_different_registers(arg_0, c_rarg1));
 1746   pass_arg1(this, arg_1);
 1747   pass_arg0(this, arg_0);
 1748   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1749 }
 1750 
 1751 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1752   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1753   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1754   pass_arg2(this, arg_2);
 1755   pass_arg1(this, arg_1);
 1756   pass_arg0(this, arg_0);
 1757   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1758 }

 2919     lea(rscratch, src);
 2920     Assembler::mulss(dst, Address(rscratch, 0));
 2921   }
 2922 }
 2923 
 2924 void MacroAssembler::null_check(Register reg, int offset) {
 2925   if (needs_explicit_null_check(offset)) {
 2926     // provoke OS null exception if reg is null by
 2927     // accessing M[reg] w/o changing any (non-CC) registers
 2928     // NOTE: cmpl is plenty here to provoke a segv
 2929     cmpptr(rax, Address(reg, 0));
 2930     // Note: should probably use testl(rax, Address(reg, 0));
 2931     //       may be shorter code (however, this version of
 2932     //       testl needs to be implemented first)
 2933   } else {
 2934     // nothing to do, (later) access of M[reg + offset]
 2935     // will provoke OS null exception if reg is null
 2936   }
 2937 }
 2938 





























































































































 2939 void MacroAssembler::os_breakpoint() {
 2940   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2941   // (e.g., MSVC can't call ps() otherwise)
 2942   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2943 }
 2944 
 2945 void MacroAssembler::unimplemented(const char* what) {
 2946   const char* buf = nullptr;
 2947   {
 2948     ResourceMark rm;
 2949     stringStream ss;
 2950     ss.print("unimplemented: %s", what);
 2951     buf = code_string(ss.as_string());
 2952   }
 2953   stop(buf);
 2954 }
 2955 
 2956 #ifdef _LP64
 2957 #define XSTATE_BV 0x200
 2958 #endif

 4106 }
 4107 
 4108 // C++ bool manipulation
 4109 void MacroAssembler::testbool(Register dst) {
 4110   if(sizeof(bool) == 1)
 4111     testb(dst, 0xff);
 4112   else if(sizeof(bool) == 2) {
 4113     // testw implementation needed for two byte bools
 4114     ShouldNotReachHere();
 4115   } else if(sizeof(bool) == 4)
 4116     testl(dst, dst);
 4117   else
 4118     // unsupported
 4119     ShouldNotReachHere();
 4120 }
 4121 
 4122 void MacroAssembler::testptr(Register dst, Register src) {
 4123   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 4124 }
 4125 
































































































































 4126 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 4127 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 4128                                    Register var_size_in_bytes,
 4129                                    int con_size_in_bytes,
 4130                                    Register t1,
 4131                                    Register t2,
 4132                                    Label& slow_case) {
 4133   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 4134   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 4135 }
 4136 
 4137 RegSet MacroAssembler::call_clobbered_gp_registers() {
 4138   RegSet regs;
 4139 #ifdef _LP64
 4140   regs += RegSet::of(rax, rcx, rdx);
 4141 #ifndef _WINDOWS
 4142   regs += RegSet::of(rsi, rdi);
 4143 #endif
 4144   regs += RegSet::range(r8, r11);
 4145 #else

 4364     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4365     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4366     // index could be 0 now, must check again
 4367     jcc(Assembler::zero, done);
 4368     bind(even);
 4369   }
 4370 #endif // !_LP64
 4371   // initialize remaining object fields: index is a multiple of 2 now
 4372   {
 4373     Label loop;
 4374     bind(loop);
 4375     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4376     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4377     decrement(index);
 4378     jcc(Assembler::notZero, loop);
 4379   }
 4380 
 4381   bind(done);
 4382 }
 4383 































































 4384 // Look up the method for a megamorphic invokeinterface call.
 4385 // The target method is determined by <intf_klass, itable_index>.
 4386 // The receiver klass is in recv_klass.
 4387 // On success, the result will be in method_result, and execution falls through.
 4388 // On failure, execution transfers to the given label.
 4389 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4390                                              Register intf_klass,
 4391                                              RegisterOrConstant itable_index,
 4392                                              Register method_result,
 4393                                              Register scan_temp,
 4394                                              Label& L_no_such_interface,
 4395                                              bool return_method) {
 4396   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4397   assert_different_registers(method_result, intf_klass, scan_temp);
 4398   assert(recv_klass != method_result || !return_method,
 4399          "recv_klass can be destroyed when method isn't needed");
 4400 
 4401   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4402          "caller must use same register for non-constant itable index as for method");
 4403 

 5433   } else {
 5434     Label L;
 5435     jccb(negate_condition(cc), L);
 5436     movl(dst, src);
 5437     bind(L);
 5438   }
 5439 }
 5440 
 5441 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 5442   if (VM_Version::supports_cmov()) {
 5443     cmovl(cc, dst, src);
 5444   } else {
 5445     Label L;
 5446     jccb(negate_condition(cc), L);
 5447     movl(dst, src);
 5448     bind(L);
 5449   }
 5450 }
 5451 
 5452 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 5453   if (!VerifyOops) return;




 5454 
 5455   BLOCK_COMMENT("verify_oop {");
 5456 #ifdef _LP64
 5457   push(rscratch1);
 5458 #endif
 5459   push(rax);                          // save rax
 5460   push(reg);                          // pass register argument
 5461 
 5462   // Pass register number to verify_oop_subroutine
 5463   const char* b = nullptr;
 5464   {
 5465     ResourceMark rm;
 5466     stringStream ss;
 5467     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 5468     b = code_string(ss.as_string());
 5469   }
 5470   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 5471   pushptr(buffer.addr(), rscratch1);
 5472 
 5473   // call indirectly to solve generation ordering problem

 5494   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 5495   int stackElementSize = Interpreter::stackElementSize;
 5496   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 5497 #ifdef ASSERT
 5498   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 5499   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 5500 #endif
 5501   Register             scale_reg    = noreg;
 5502   Address::ScaleFactor scale_factor = Address::no_scale;
 5503   if (arg_slot.is_constant()) {
 5504     offset += arg_slot.as_constant() * stackElementSize;
 5505   } else {
 5506     scale_reg    = arg_slot.as_register();
 5507     scale_factor = Address::times(stackElementSize);
 5508   }
 5509   offset += wordSize;           // return PC is on stack
 5510   return Address(rsp, scale_reg, scale_factor, offset);
 5511 }
 5512 
 5513 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5514   if (!VerifyOops) return;




 5515 
 5516 #ifdef _LP64
 5517   push(rscratch1);
 5518 #endif
 5519   push(rax); // save rax,
 5520   // addr may contain rsp so we will have to adjust it based on the push
 5521   // we just did (and on 64 bit we do two pushes)
 5522   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5523   // stores rax into addr which is backwards of what was intended.
 5524   if (addr.uses(rsp)) {
 5525     lea(rax, addr);
 5526     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 5527   } else {
 5528     pushptr(addr);
 5529   }
 5530 
 5531   // Pass register number to verify_oop_subroutine
 5532   const char* b = nullptr;
 5533   {
 5534     ResourceMark rm;

 5981 
 5982 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5983   // get mirror
 5984   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5985   load_method_holder(mirror, method);
 5986   movptr(mirror, Address(mirror, mirror_offset));
 5987   resolve_oop_handle(mirror, tmp);
 5988 }
 5989 
 5990 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5991   load_method_holder(rresult, rmethod);
 5992   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5993 }
 5994 
 5995 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5996   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5997   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5998   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5999 }
 6000 













 6001 #ifdef _LP64
 6002 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 6003   assert(UseCompactObjectHeaders, "expect compact object headers");
 6004   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 6005   shrq(dst, markWord::klass_shift);
 6006 }
 6007 #endif
 6008 
 6009 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 6010   assert_different_registers(src, tmp);
 6011   assert_different_registers(dst, tmp);
 6012 #ifdef _LP64
 6013   if (UseCompactObjectHeaders) {
 6014     load_narrow_klass_compact(dst, src);
 6015     decode_klass_not_null(dst, tmp);
 6016   } else if (UseCompressedClassPointers) {
 6017     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 6018     decode_klass_not_null(dst, tmp);
 6019   } else
 6020 #endif
 6021   {
 6022     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 6023   }
 6024 }
 6025 





 6026 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 6027   assert(!UseCompactObjectHeaders, "not with compact headers");
 6028   assert_different_registers(src, tmp);
 6029   assert_different_registers(dst, tmp);
 6030 #ifdef _LP64
 6031   if (UseCompressedClassPointers) {
 6032     encode_klass_not_null(src, tmp);
 6033     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 6034   } else
 6035 #endif
 6036     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 6037 }
 6038 
 6039 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 6040 #ifdef _LP64
 6041   if (UseCompactObjectHeaders) {
 6042     assert(tmp != noreg, "need tmp");
 6043     assert_different_registers(klass, obj, tmp);
 6044     load_narrow_klass_compact(tmp, obj);
 6045     cmpl(klass, tmp);

 6078   bool as_raw = (decorators & AS_RAW) != 0;
 6079   if (as_raw) {
 6080     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 6081   } else {
 6082     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 6083   }
 6084 }
 6085 
 6086 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 6087                                      Register tmp1, Register tmp2, Register tmp3) {
 6088   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 6089   decorators = AccessInternal::decorator_fixup(decorators, type);
 6090   bool as_raw = (decorators & AS_RAW) != 0;
 6091   if (as_raw) {
 6092     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 6093   } else {
 6094     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 6095   }
 6096 }
 6097 








































 6098 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 6099                                    Register thread_tmp, DecoratorSet decorators) {
 6100   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 6101 }
 6102 
 6103 // Doesn't do verification, generates fixed size code
 6104 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 6105                                             Register thread_tmp, DecoratorSet decorators) {
 6106   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 6107 }
 6108 
 6109 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 6110                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 6111   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 6112 }
 6113 
 6114 // Used for storing nulls.
 6115 void MacroAssembler::store_heap_oop_null(Address dst) {
 6116   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 6117 }

 6426 
 6427 void MacroAssembler::reinit_heapbase() {
 6428   if (UseCompressedOops) {
 6429     if (Universe::heap() != nullptr) {
 6430       if (CompressedOops::base() == nullptr) {
 6431         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 6432       } else {
 6433         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 6434       }
 6435     } else {
 6436       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 6437     }
 6438   }
 6439 }
 6440 
 6441 #endif // _LP64
 6442 
 6443 #if COMPILER2_OR_JVMCI
 6444 
 6445 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 6446 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6447   // cnt - number of qwords (8-byte words).
 6448   // base - start address, qword aligned.
 6449   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 6450   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 6451   if (use64byteVector) {
 6452     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
 6453   } else if (MaxVectorSize >= 32) {
 6454     vpxor(xtmp, xtmp, xtmp, AVX_256bit);


 6455   } else {
 6456     pxor(xtmp, xtmp);

 6457   }
 6458   jmp(L_zero_64_bytes);
 6459 
 6460   BIND(L_loop);
 6461   if (MaxVectorSize >= 32) {
 6462     fill64(base, 0, xtmp, use64byteVector);
 6463   } else {
 6464     movdqu(Address(base,  0), xtmp);
 6465     movdqu(Address(base, 16), xtmp);
 6466     movdqu(Address(base, 32), xtmp);
 6467     movdqu(Address(base, 48), xtmp);
 6468   }
 6469   addptr(base, 64);
 6470 
 6471   BIND(L_zero_64_bytes);
 6472   subptr(cnt, 8);
 6473   jccb(Assembler::greaterEqual, L_loop);
 6474 
 6475   // Copy trailing 64 bytes
 6476   if (use64byteVector) {
 6477     addptr(cnt, 8);
 6478     jccb(Assembler::equal, L_end);
 6479     fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
 6480     jmp(L_end);
 6481   } else {
 6482     addptr(cnt, 4);
 6483     jccb(Assembler::less, L_tail);
 6484     if (MaxVectorSize >= 32) {
 6485       vmovdqu(Address(base, 0), xtmp);
 6486     } else {
 6487       movdqu(Address(base,  0), xtmp);
 6488       movdqu(Address(base, 16), xtmp);
 6489     }
 6490   }
 6491   addptr(base, 32);
 6492   subptr(cnt, 4);
 6493 
 6494   BIND(L_tail);
 6495   addptr(cnt, 4);
 6496   jccb(Assembler::lessEqual, L_end);
 6497   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 6498     fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
 6499   } else {
 6500     decrement(cnt);
 6501 
 6502     BIND(L_sloop);
 6503     movq(Address(base, 0), xtmp);
 6504     addptr(base, 8);
 6505     decrement(cnt);
 6506     jccb(Assembler::greaterEqual, L_sloop);
 6507   }
 6508   BIND(L_end);
 6509 }
 6510 






















































































































































































































































































































































































































 6511 // Clearing constant sized memory using YMM/ZMM registers.
 6512 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6513   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 6514   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6515 
 6516   int vector64_count = (cnt & (~0x7)) >> 3;
 6517   cnt = cnt & 0x7;
 6518   const int fill64_per_loop = 4;
 6519   const int max_unrolled_fill64 = 8;
 6520 
 6521   // 64 byte initialization loop.
 6522   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 6523   int start64 = 0;
 6524   if (vector64_count > max_unrolled_fill64) {
 6525     Label LOOP;
 6526     Register index = rtmp;
 6527 
 6528     start64 = vector64_count - (vector64_count % fill64_per_loop);
 6529 
 6530     movl(index, 0);

 6580         break;
 6581       case 7:
 6582         if (use64byteVector) {
 6583           movl(rtmp, 0x7F);
 6584           kmovwl(mask, rtmp);
 6585           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6586         } else {
 6587           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6588           movl(rtmp, 0x7);
 6589           kmovwl(mask, rtmp);
 6590           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6591         }
 6592         break;
 6593       default:
 6594         fatal("Unexpected length : %d\n",cnt);
 6595         break;
 6596     }
 6597   }
 6598 }
 6599 
 6600 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
 6601                                bool is_large, KRegister mask) {
 6602   // cnt      - number of qwords (8-byte words).
 6603   // base     - start address, qword aligned.
 6604   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6605   assert(base==rdi, "base register must be edi for rep stos");
 6606   assert(tmp==rax,   "tmp register must be eax for rep stos");
 6607   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6608   assert(InitArrayShortSize % BytesPerLong == 0,
 6609     "InitArrayShortSize should be the multiple of BytesPerLong");
 6610 
 6611   Label DONE;
 6612   if (!is_large || !UseXMMForObjInit) {
 6613     xorptr(tmp, tmp);
 6614   }
 6615 
 6616   if (!is_large) {
 6617     Label LOOP, LONG;
 6618     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6619     jccb(Assembler::greater, LONG);
 6620 
 6621     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6622 
 6623     decrement(cnt);
 6624     jccb(Assembler::negative, DONE); // Zero length
 6625 
 6626     // Use individual pointer-sized stores for small counts:
 6627     BIND(LOOP);
 6628     movptr(Address(base, cnt, Address::times_ptr), tmp);
 6629     decrement(cnt);
 6630     jccb(Assembler::greaterEqual, LOOP);
 6631     jmpb(DONE);
 6632 
 6633     BIND(LONG);
 6634   }
 6635 
 6636   // Use longer rep-prefixed ops for non-small counts:
 6637   if (UseFastStosb) {
 6638     shlptr(cnt, 3); // convert to number of bytes
 6639     rep_stosb();
 6640   } else if (UseXMMForObjInit) {
 6641     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
 6642   } else {
 6643     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6644     rep_stos();
 6645   }
 6646 
 6647   BIND(DONE);
 6648 }
 6649 
 6650 #endif //COMPILER2_OR_JVMCI
 6651 
 6652 
 6653 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6654                                    Register to, Register value, Register count,
 6655                                    Register rtmp, XMMRegister xtmp) {
 6656   ShortBranchVerifier sbv(this);
 6657   assert_different_registers(to, value, count, rtmp);
 6658   Label L_exit;
 6659   Label L_fill_2_bytes, L_fill_4_bytes;
 6660 
 6661 #if defined(COMPILER2) && defined(_LP64)

10742 
10743   // Load top.
10744   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10745 
10746   // Check if the lock-stack is full.
10747   cmpl(top, LockStack::end_offset());
10748   jcc(Assembler::greaterEqual, slow);
10749 
10750   // Check for recursion.
10751   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10752   jcc(Assembler::equal, push);
10753 
10754   // Check header for monitor (0b10).
10755   testptr(reg_rax, markWord::monitor_value);
10756   jcc(Assembler::notZero, slow);
10757 
10758   // Try to lock. Transition lock bits 0b01 => 0b00
10759   movptr(tmp, reg_rax);
10760   andptr(tmp, ~(int32_t)markWord::unlocked_value);
10761   orptr(reg_rax, markWord::unlocked_value);




10762   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10763   jcc(Assembler::notEqual, slow);
10764 
10765   // Restore top, CAS clobbers register.
10766   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10767 
10768   bind(push);
10769   // After successful lock, push object on lock-stack.
10770   movptr(Address(thread, top), obj);
10771   incrementl(top, oopSize);
10772   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10773 }
10774 
10775 // Implements lightweight-unlocking.
10776 //
10777 // obj: the object to be unlocked
10778 // reg_rax: rax
10779 // thread: the thread
10780 // tmp: a temporary register
10781 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {

   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/compiledIC.hpp"
   28 #include "compiler/compiler_globals.hpp"
   29 #include "compiler/disassembler.hpp"
   30 #include "ci/ciInlineKlass.hpp"
   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "interpreter/interpreterRuntime.hpp"
   39 #include "jvm.h"
   40 #include "memory/resourceArea.hpp"
   41 #include "memory/universe.hpp"
   42 #include "oops/accessDecorators.hpp"
   43 #include "oops/compressedKlass.inline.hpp"
   44 #include "oops/compressedOops.inline.hpp"
   45 #include "oops/klass.inline.hpp"
   46 #include "oops/resolvedFieldEntry.hpp"
   47 #include "prims/methodHandles.hpp"
   48 #include "runtime/continuation.hpp"
   49 #include "runtime/interfaceSupport.inline.hpp"
   50 #include "runtime/javaThread.hpp"
   51 #include "runtime/jniHandles.hpp"
   52 #include "runtime/objectMonitor.hpp"
   53 #include "runtime/os.hpp"
   54 #include "runtime/safepoint.hpp"
   55 #include "runtime/safepointMechanism.hpp"
   56 #include "runtime/sharedRuntime.hpp"
   57 #include "runtime/signature_cc.hpp"
   58 #include "runtime/stubRoutines.hpp"
   59 #include "utilities/checkedCast.hpp"
   60 #include "utilities/macros.hpp"
   61 #include "vmreg_x86.inline.hpp"
   62 #ifdef COMPILER2
   63 #include "opto/output.hpp"
   64 #endif
   65 
   66 #ifdef PRODUCT
   67 #define BLOCK_COMMENT(str) /* nothing */
   68 #define STOP(error) stop(error)
   69 #else
   70 #define BLOCK_COMMENT(str) block_comment(str)
   71 #define STOP(error) block_comment(error); stop(error)
   72 #endif
   73 
   74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   75 
   76 #ifdef ASSERT
   77 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   78 #endif
   79 
   80 static const Assembler::Condition reverse[] = {
   81     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   82     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   83     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   84     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1726 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1727   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1728   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1729   pass_arg2(this, arg_2);
 1730   pass_arg1(this, arg_1);
 1731   pass_arg0(this, arg_0);
 1732   call_VM_leaf(entry_point, 3);
 1733 }
 1734 
 1735 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1736   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3));
 1737   LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3));
 1738   LP64_ONLY(assert_different_registers(arg_2, c_rarg3));
 1739   pass_arg3(this, arg_3);
 1740   pass_arg2(this, arg_2);
 1741   pass_arg1(this, arg_1);
 1742   pass_arg0(this, arg_0);
 1743   call_VM_leaf(entry_point, 3);
 1744 }
 1745 
 1746 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1747   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1748 }
 1749 
 1750 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1751   pass_arg0(this, arg_0);
 1752   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1753 }
 1754 
 1755 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1756   LP64_ONLY(assert_different_registers(arg_0, c_rarg1));
 1757   pass_arg1(this, arg_1);
 1758   pass_arg0(this, arg_0);
 1759   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1760 }
 1761 
 1762 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1763   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1764   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1765   pass_arg2(this, arg_2);
 1766   pass_arg1(this, arg_1);
 1767   pass_arg0(this, arg_0);
 1768   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1769 }

 2930     lea(rscratch, src);
 2931     Assembler::mulss(dst, Address(rscratch, 0));
 2932   }
 2933 }
 2934 
 2935 void MacroAssembler::null_check(Register reg, int offset) {
 2936   if (needs_explicit_null_check(offset)) {
 2937     // provoke OS null exception if reg is null by
 2938     // accessing M[reg] w/o changing any (non-CC) registers
 2939     // NOTE: cmpl is plenty here to provoke a segv
 2940     cmpptr(rax, Address(reg, 0));
 2941     // Note: should probably use testl(rax, Address(reg, 0));
 2942     //       may be shorter code (however, this version of
 2943     //       testl needs to be implemented first)
 2944   } else {
 2945     // nothing to do, (later) access of M[reg + offset]
 2946     // will provoke OS null exception if reg is null
 2947   }
 2948 }
 2949 
 2950 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2951   andptr(markword, markWord::inline_type_mask_in_place);
 2952   cmpptr(markword, markWord::inline_type_pattern);
 2953   jcc(Assembler::equal, is_inline_type);
 2954 }
 2955 
 2956 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
 2957   load_unsigned_short(temp_reg, Address(klass, Klass::access_flags_offset()));
 2958   testl(temp_reg, JVM_ACC_IDENTITY);
 2959   jcc(Assembler::zero, is_inline_type);
 2960 }
 2961 
 2962 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
 2963   testptr(object, object);
 2964   jcc(Assembler::zero, not_inline_type);
 2965   const int is_inline_type_mask = markWord::inline_type_pattern;
 2966   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2967   andptr(tmp, is_inline_type_mask);
 2968   cmpptr(tmp, is_inline_type_mask);
 2969   jcc(Assembler::notEqual, not_inline_type);
 2970 }
 2971 
 2972 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
 2973 #ifdef ASSERT
 2974   {
 2975     Label done_check;
 2976     test_klass_is_inline_type(klass, temp_reg, done_check);
 2977     stop("test_klass_is_empty_inline_type with non inline type klass");
 2978     bind(done_check);
 2979   }
 2980 #endif
 2981   movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset()));
 2982   testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value());
 2983   jcc(Assembler::notZero, is_empty_inline_type);
 2984 }
 2985 
 2986 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2987   movl(temp_reg, flags);
 2988   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2989   jcc(Assembler::notEqual, is_null_free_inline_type);
 2990 }
 2991 
 2992 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2993   movl(temp_reg, flags);
 2994   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2995   jcc(Assembler::equal, not_null_free_inline_type);
 2996 }
 2997 
 2998 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
 2999   movl(temp_reg, flags);
 3000   testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
 3001   jcc(Assembler::notEqual, is_flat);
 3002 }
 3003 
 3004 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
 3005   movl(temp_reg, flags);
 3006   testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
 3007   jcc(Assembler::notEqual, has_null_marker);
 3008 }
 3009 
 3010 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 3011   Label test_mark_word;
 3012   // load mark word
 3013   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 3014   // check displaced
 3015   testl(temp_reg, markWord::unlocked_value);
 3016   jccb(Assembler::notZero, test_mark_word);
 3017   // slow path use klass prototype
 3018   push(rscratch1);
 3019   load_prototype_header(temp_reg, oop, rscratch1);
 3020   pop(rscratch1);
 3021 
 3022   bind(test_mark_word);
 3023   testl(temp_reg, test_bit);
 3024   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 3025 }
 3026 
 3027 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
 3028                                          Label& is_flat_array) {
 3029 #ifdef _LP64
 3030   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
 3031 #else
 3032   load_klass(temp_reg, oop, noreg);
 3033   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 3034   test_flat_array_layout(temp_reg, is_flat_array);
 3035 #endif
 3036 }
 3037 
 3038 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
 3039                                              Label& is_non_flat_array) {
 3040 #ifdef _LP64
 3041   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
 3042 #else
 3043   load_klass(temp_reg, oop, noreg);
 3044   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 3045   test_non_flat_array_layout(temp_reg, is_non_flat_array);
 3046 #endif
 3047 }
 3048 
 3049 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 3050 #ifdef _LP64
 3051   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 3052 #else
 3053   Unimplemented();
 3054 #endif
 3055 }
 3056 
 3057 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 3058 #ifdef _LP64
 3059   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 3060 #else
 3061   Unimplemented();
 3062 #endif
 3063 }
 3064 
 3065 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
 3066   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 3067   jcc(Assembler::notZero, is_flat_array);
 3068 }
 3069 
 3070 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
 3071   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 3072   jcc(Assembler::zero, is_non_flat_array);
 3073 }
 3074 
 3075 void MacroAssembler::os_breakpoint() {
 3076   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 3077   // (e.g., MSVC can't call ps() otherwise)
 3078   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 3079 }
 3080 
 3081 void MacroAssembler::unimplemented(const char* what) {
 3082   const char* buf = nullptr;
 3083   {
 3084     ResourceMark rm;
 3085     stringStream ss;
 3086     ss.print("unimplemented: %s", what);
 3087     buf = code_string(ss.as_string());
 3088   }
 3089   stop(buf);
 3090 }
 3091 
 3092 #ifdef _LP64
 3093 #define XSTATE_BV 0x200
 3094 #endif

 4242 }
 4243 
 4244 // C++ bool manipulation
 4245 void MacroAssembler::testbool(Register dst) {
 4246   if(sizeof(bool) == 1)
 4247     testb(dst, 0xff);
 4248   else if(sizeof(bool) == 2) {
 4249     // testw implementation needed for two byte bools
 4250     ShouldNotReachHere();
 4251   } else if(sizeof(bool) == 4)
 4252     testl(dst, dst);
 4253   else
 4254     // unsupported
 4255     ShouldNotReachHere();
 4256 }
 4257 
 4258 void MacroAssembler::testptr(Register dst, Register src) {
 4259   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 4260 }
 4261 
 4262 // Object / value buffer allocation...
 4263 //
 4264 // Kills klass and rsi on LP64
 4265 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 4266                                        Register t1, Register t2,
 4267                                        bool clear_fields, Label& alloc_failed)
 4268 {
 4269   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 4270   Register layout_size = t1;
 4271   assert(new_obj == rax, "needs to be rax");
 4272   assert_different_registers(klass, new_obj, t1, t2);
 4273 
 4274   // get instance_size in InstanceKlass (scaled to a count of bytes)
 4275   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 4276   // test to see if it is malformed in some way
 4277   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 4278   jcc(Assembler::notZero, slow_case_no_pop);
 4279 
 4280   // Allocate the instance:
 4281   //  If TLAB is enabled:
 4282   //    Try to allocate in the TLAB.
 4283   //    If fails, go to the slow path.
 4284   //  Else If inline contiguous allocations are enabled:
 4285   //    Try to allocate in eden.
 4286   //    If fails due to heap end, go to slow path.
 4287   //
 4288   //  If TLAB is enabled OR inline contiguous is enabled:
 4289   //    Initialize the allocation.
 4290   //    Exit.
 4291   //
 4292   //  Go to slow path.
 4293 
 4294   push(klass);
 4295   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass);
 4296 #ifndef _LP64
 4297   if (UseTLAB) {
 4298     get_thread(thread);
 4299   }
 4300 #endif // _LP64
 4301 
 4302   if (UseTLAB) {
 4303     tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case);
 4304     if (ZeroTLAB || (!clear_fields)) {
 4305       // the fields have been already cleared
 4306       jmp(initialize_header);
 4307     } else {
 4308       // initialize both the header and fields
 4309       jmp(initialize_object);
 4310     }
 4311   } else {
 4312     jmp(slow_case);
 4313   }
 4314 
 4315   // If UseTLAB is true, the object is created above and there is an initialize need.
 4316   // Otherwise, skip and go to the slow path.
 4317   if (UseTLAB) {
 4318     if (clear_fields) {
 4319       // The object is initialized before the header.  If the object size is
 4320       // zero, go directly to the header initialization.
 4321       bind(initialize_object);
 4322       if (UseCompactObjectHeaders) {
 4323         assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
 4324         decrement(layout_size, oopDesc::base_offset_in_bytes());
 4325       } else {
 4326         decrement(layout_size, sizeof(oopDesc));
 4327       }
 4328       jcc(Assembler::zero, initialize_header);
 4329 
 4330       // Initialize topmost object field, divide size by 8, check if odd and
 4331       // test if zero.
 4332       Register zero = klass;
 4333       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 4334       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 4335 
 4336   #ifdef ASSERT
 4337       // make sure instance_size was multiple of 8
 4338       Label L;
 4339       // Ignore partial flag stall after shrl() since it is debug VM
 4340       jcc(Assembler::carryClear, L);
 4341       stop("object size is not multiple of 2 - adjust this code");
 4342       bind(L);
 4343       // must be > 0, no extra check needed here
 4344   #endif
 4345 
 4346       // initialize remaining object fields: instance_size was a multiple of 8
 4347       {
 4348         Label loop;
 4349         bind(loop);
 4350         int header_size_bytes = oopDesc::header_size() * HeapWordSize;
 4351         assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
 4352         movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
 4353         NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 2*oopSize), zero));
 4354         decrement(layout_size);
 4355         jcc(Assembler::notZero, loop);
 4356       }
 4357     } // clear_fields
 4358 
 4359     // initialize object header only.
 4360     bind(initialize_header);
 4361     if (UseCompactObjectHeaders || EnableValhalla) {
 4362       pop(klass);
 4363       Register mark_word = t2;
 4364       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 4365       movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 4366     } else {
 4367      movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
 4368             (intptr_t)markWord::prototype().value()); // header
 4369      pop(klass);   // get saved klass back in the register.
 4370     }
 4371     if (!UseCompactObjectHeaders) {
 4372 #ifdef _LP64
 4373       xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 4374       store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 4375 #endif
 4376       movptr(t2, klass);         // preserve klass
 4377       store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 4378     }
 4379     jmp(done);
 4380   }
 4381 
 4382   bind(slow_case);
 4383   pop(klass);
 4384   bind(slow_case_no_pop);
 4385   jmp(alloc_failed);
 4386 
 4387   bind(done);
 4388 }
 4389 
 4390 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 4391 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 4392                                    Register var_size_in_bytes,
 4393                                    int con_size_in_bytes,
 4394                                    Register t1,
 4395                                    Register t2,
 4396                                    Label& slow_case) {
 4397   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 4398   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 4399 }
 4400 
 4401 RegSet MacroAssembler::call_clobbered_gp_registers() {
 4402   RegSet regs;
 4403 #ifdef _LP64
 4404   regs += RegSet::of(rax, rcx, rdx);
 4405 #ifndef _WINDOWS
 4406   regs += RegSet::of(rsi, rdi);
 4407 #endif
 4408   regs += RegSet::range(r8, r11);
 4409 #else

 4628     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4629     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4630     // index could be 0 now, must check again
 4631     jcc(Assembler::zero, done);
 4632     bind(even);
 4633   }
 4634 #endif // !_LP64
 4635   // initialize remaining object fields: index is a multiple of 2 now
 4636   {
 4637     Label loop;
 4638     bind(loop);
 4639     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4640     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4641     decrement(index);
 4642     jcc(Assembler::notZero, loop);
 4643   }
 4644 
 4645   bind(done);
 4646 }
 4647 
 4648 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
 4649   inline_layout_info(holder_klass, index, inline_klass);
 4650   movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
 4651 }
 4652 
 4653 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
 4654   movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
 4655 #ifdef ASSERT
 4656   {
 4657     Label done;
 4658     cmpptr(layout_info, 0);
 4659     jcc(Assembler::notEqual, done);
 4660     stop("inline_layout_info_array is null");
 4661     bind(done);
 4662   }
 4663 #endif
 4664 
 4665   InlineLayoutInfo array[2];
 4666   int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
 4667   if (is_power_of_2(size)) {
 4668     shll(index, log2i_exact(size)); // Scale index by power of 2
 4669   } else {
 4670     imull(index, index, size); // Scale the index to be the entry index * array_element_size
 4671   }
 4672   lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
 4673 }
 4674 
 4675 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
 4676 #ifdef ASSERT
 4677   {
 4678     Label done_check;
 4679     test_klass_is_inline_type(inline_klass, temp_reg, done_check);
 4680     stop("get_default_value_oop from non inline type klass");
 4681     bind(done_check);
 4682   }
 4683 #endif
 4684   Register offset = temp_reg;
 4685   // Getting the offset of the pre-allocated default value
 4686   movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
 4687   movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
 4688 
 4689   // Getting the mirror
 4690   movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
 4691   resolve_oop_handle(obj, inline_klass);
 4692 
 4693   // Getting the pre-allocated default value from the mirror
 4694   Address field(obj, offset, Address::times_1);
 4695   load_heap_oop(obj, field);
 4696 }
 4697 
 4698 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
 4699 #ifdef ASSERT
 4700   {
 4701     Label done_check;
 4702     test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
 4703     stop("get_empty_value from non-empty inline klass");
 4704     bind(done_check);
 4705   }
 4706 #endif
 4707   get_default_value_oop(inline_klass, temp_reg, obj);
 4708 }
 4709 
 4710 
 4711 // Look up the method for a megamorphic invokeinterface call.
 4712 // The target method is determined by <intf_klass, itable_index>.
 4713 // The receiver klass is in recv_klass.
 4714 // On success, the result will be in method_result, and execution falls through.
 4715 // On failure, execution transfers to the given label.
 4716 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4717                                              Register intf_klass,
 4718                                              RegisterOrConstant itable_index,
 4719                                              Register method_result,
 4720                                              Register scan_temp,
 4721                                              Label& L_no_such_interface,
 4722                                              bool return_method) {
 4723   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4724   assert_different_registers(method_result, intf_klass, scan_temp);
 4725   assert(recv_klass != method_result || !return_method,
 4726          "recv_klass can be destroyed when method isn't needed");
 4727 
 4728   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4729          "caller must use same register for non-constant itable index as for method");
 4730 

 5760   } else {
 5761     Label L;
 5762     jccb(negate_condition(cc), L);
 5763     movl(dst, src);
 5764     bind(L);
 5765   }
 5766 }
 5767 
 5768 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 5769   if (VM_Version::supports_cmov()) {
 5770     cmovl(cc, dst, src);
 5771   } else {
 5772     Label L;
 5773     jccb(negate_condition(cc), L);
 5774     movl(dst, src);
 5775     bind(L);
 5776   }
 5777 }
 5778 
 5779 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 5780   if (!VerifyOops || VerifyAdapterSharing) {
 5781     // Below address of the code string confuses VerifyAdapterSharing
 5782     // because it may differ between otherwise equivalent adapters.
 5783     return;
 5784   }
 5785 
 5786   BLOCK_COMMENT("verify_oop {");
 5787 #ifdef _LP64
 5788   push(rscratch1);
 5789 #endif
 5790   push(rax);                          // save rax
 5791   push(reg);                          // pass register argument
 5792 
 5793   // Pass register number to verify_oop_subroutine
 5794   const char* b = nullptr;
 5795   {
 5796     ResourceMark rm;
 5797     stringStream ss;
 5798     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 5799     b = code_string(ss.as_string());
 5800   }
 5801   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 5802   pushptr(buffer.addr(), rscratch1);
 5803 
 5804   // call indirectly to solve generation ordering problem

 5825   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 5826   int stackElementSize = Interpreter::stackElementSize;
 5827   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 5828 #ifdef ASSERT
 5829   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 5830   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 5831 #endif
 5832   Register             scale_reg    = noreg;
 5833   Address::ScaleFactor scale_factor = Address::no_scale;
 5834   if (arg_slot.is_constant()) {
 5835     offset += arg_slot.as_constant() * stackElementSize;
 5836   } else {
 5837     scale_reg    = arg_slot.as_register();
 5838     scale_factor = Address::times(stackElementSize);
 5839   }
 5840   offset += wordSize;           // return PC is on stack
 5841   return Address(rsp, scale_reg, scale_factor, offset);
 5842 }
 5843 
 5844 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5845   if (!VerifyOops || VerifyAdapterSharing) {
 5846     // Below address of the code string confuses VerifyAdapterSharing
 5847     // because it may differ between otherwise equivalent adapters.
 5848     return;
 5849   }
 5850 
 5851 #ifdef _LP64
 5852   push(rscratch1);
 5853 #endif
 5854   push(rax); // save rax,
 5855   // addr may contain rsp so we will have to adjust it based on the push
 5856   // we just did (and on 64 bit we do two pushes)
 5857   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5858   // stores rax into addr which is backwards of what was intended.
 5859   if (addr.uses(rsp)) {
 5860     lea(rax, addr);
 5861     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 5862   } else {
 5863     pushptr(addr);
 5864   }
 5865 
 5866   // Pass register number to verify_oop_subroutine
 5867   const char* b = nullptr;
 5868   {
 5869     ResourceMark rm;

 6316 
 6317 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 6318   // get mirror
 6319   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 6320   load_method_holder(mirror, method);
 6321   movptr(mirror, Address(mirror, mirror_offset));
 6322   resolve_oop_handle(mirror, tmp);
 6323 }
 6324 
 6325 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 6326   load_method_holder(rresult, rmethod);
 6327   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 6328 }
 6329 
 6330 void MacroAssembler::load_method_holder(Register holder, Register method) {
 6331   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 6332   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 6333   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 6334 }
 6335 
 6336 void MacroAssembler::load_metadata(Register dst, Register src) {
 6337 #ifdef _LP64
 6338   if (UseCompactObjectHeaders) {
 6339     load_narrow_klass_compact(dst, src);
 6340   } else if (UseCompressedClassPointers) {
 6341     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 6342   } else
 6343 #endif
 6344   {
 6345     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 6346   }
 6347 }
 6348 
 6349 #ifdef _LP64
 6350 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 6351   assert(UseCompactObjectHeaders, "expect compact object headers");
 6352   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 6353   shrq(dst, markWord::klass_shift);
 6354 }
 6355 #endif
 6356 
 6357 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 6358   assert_different_registers(src, tmp);
 6359   assert_different_registers(dst, tmp);
 6360 #ifdef _LP64
 6361   if (UseCompactObjectHeaders) {
 6362     load_narrow_klass_compact(dst, src);
 6363     decode_klass_not_null(dst, tmp);
 6364   } else if (UseCompressedClassPointers) {
 6365     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 6366     decode_klass_not_null(dst, tmp);
 6367   } else
 6368 #endif
 6369   {
 6370     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 6371   }
 6372 }
 6373 
 6374 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 6375   load_klass(dst, src, tmp);
 6376   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 6377 }
 6378 
 6379 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 6380   assert(!UseCompactObjectHeaders, "not with compact headers");
 6381   assert_different_registers(src, tmp);
 6382   assert_different_registers(dst, tmp);
 6383 #ifdef _LP64
 6384   if (UseCompressedClassPointers) {
 6385     encode_klass_not_null(src, tmp);
 6386     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 6387   } else
 6388 #endif
 6389     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 6390 }
 6391 
 6392 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 6393 #ifdef _LP64
 6394   if (UseCompactObjectHeaders) {
 6395     assert(tmp != noreg, "need tmp");
 6396     assert_different_registers(klass, obj, tmp);
 6397     load_narrow_klass_compact(tmp, obj);
 6398     cmpl(klass, tmp);

 6431   bool as_raw = (decorators & AS_RAW) != 0;
 6432   if (as_raw) {
 6433     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 6434   } else {
 6435     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 6436   }
 6437 }
 6438 
 6439 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 6440                                      Register tmp1, Register tmp2, Register tmp3) {
 6441   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 6442   decorators = AccessInternal::decorator_fixup(decorators, type);
 6443   bool as_raw = (decorators & AS_RAW) != 0;
 6444   if (as_raw) {
 6445     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 6446   } else {
 6447     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 6448   }
 6449 }
 6450 
 6451 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
 6452                                      Register inline_layout_info) {
 6453   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 6454   bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
 6455 }
 6456 
 6457 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
 6458   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 6459   movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
 6460 }
 6461 
 6462 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
 6463   // ((address) (void*) o) + vk->payload_offset();
 6464   Register offset = (data == oop) ? rscratch1 : data;
 6465   payload_offset(inline_klass, offset);
 6466   if (data == oop) {
 6467     addptr(data, offset);
 6468   } else {
 6469     lea(data, Address(oop, offset));
 6470   }
 6471 }
 6472 
 6473 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 6474                                                 Register index, Register data) {
 6475   assert(index != rcx, "index needs to shift by rcx");
 6476   assert_different_registers(array, array_klass, index);
 6477   assert_different_registers(rcx, array, index);
 6478 
 6479   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 6480   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 6481 
 6482   // Klass::layout_helper_log2_element_size(lh)
 6483   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 6484   shrl(rcx, Klass::_lh_log2_element_size_shift);
 6485   andl(rcx, Klass::_lh_log2_element_size_mask);
 6486   shlptr(index); // index << rcx
 6487 
 6488   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
 6489 }
 6490 
 6491 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 6492                                    Register thread_tmp, DecoratorSet decorators) {
 6493   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 6494 }
 6495 
 6496 // Doesn't do verification, generates fixed size code
 6497 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 6498                                             Register thread_tmp, DecoratorSet decorators) {
 6499   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 6500 }
 6501 
 6502 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 6503                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 6504   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 6505 }
 6506 
 6507 // Used for storing nulls.
 6508 void MacroAssembler::store_heap_oop_null(Address dst) {
 6509   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 6510 }

 6819 
 6820 void MacroAssembler::reinit_heapbase() {
 6821   if (UseCompressedOops) {
 6822     if (Universe::heap() != nullptr) {
 6823       if (CompressedOops::base() == nullptr) {
 6824         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 6825       } else {
 6826         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 6827       }
 6828     } else {
 6829       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 6830     }
 6831   }
 6832 }
 6833 
 6834 #endif // _LP64
 6835 
 6836 #if COMPILER2_OR_JVMCI
 6837 
 6838 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 6839 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 6840   // cnt - number of qwords (8-byte words).
 6841   // base - start address, qword aligned.
 6842   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 6843   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 6844   if (use64byteVector) {
 6845     evpbroadcastq(xtmp, val, AVX_512bit);
 6846   } else if (MaxVectorSize >= 32) {
 6847     movdq(xtmp, val);
 6848     punpcklqdq(xtmp, xtmp);
 6849     vinserti128_high(xtmp, xtmp);
 6850   } else {
 6851     movdq(xtmp, val);
 6852     punpcklqdq(xtmp, xtmp);
 6853   }
 6854   jmp(L_zero_64_bytes);
 6855 
 6856   BIND(L_loop);
 6857   if (MaxVectorSize >= 32) {
 6858     fill64(base, 0, xtmp, use64byteVector);
 6859   } else {
 6860     movdqu(Address(base,  0), xtmp);
 6861     movdqu(Address(base, 16), xtmp);
 6862     movdqu(Address(base, 32), xtmp);
 6863     movdqu(Address(base, 48), xtmp);
 6864   }
 6865   addptr(base, 64);
 6866 
 6867   BIND(L_zero_64_bytes);
 6868   subptr(cnt, 8);
 6869   jccb(Assembler::greaterEqual, L_loop);
 6870 
 6871   // Copy trailing 64 bytes
 6872   if (use64byteVector) {
 6873     addptr(cnt, 8);
 6874     jccb(Assembler::equal, L_end);
 6875     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 6876     jmp(L_end);
 6877   } else {
 6878     addptr(cnt, 4);
 6879     jccb(Assembler::less, L_tail);
 6880     if (MaxVectorSize >= 32) {
 6881       vmovdqu(Address(base, 0), xtmp);
 6882     } else {
 6883       movdqu(Address(base,  0), xtmp);
 6884       movdqu(Address(base, 16), xtmp);
 6885     }
 6886   }
 6887   addptr(base, 32);
 6888   subptr(cnt, 4);
 6889 
 6890   BIND(L_tail);
 6891   addptr(cnt, 4);
 6892   jccb(Assembler::lessEqual, L_end);
 6893   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 6894     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 6895   } else {
 6896     decrement(cnt);
 6897 
 6898     BIND(L_sloop);
 6899     movq(Address(base, 0), xtmp);
 6900     addptr(base, 8);
 6901     decrement(cnt);
 6902     jccb(Assembler::greaterEqual, L_sloop);
 6903   }
 6904   BIND(L_end);
 6905 }
 6906 
 6907 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 6908   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 6909   // An inline type might be returned. If fields are in registers we
 6910   // need to allocate an inline type instance and initialize it with
 6911   // the value of the fields.
 6912   Label skip;
 6913   // We only need a new buffered inline type if a new one is not returned
 6914   testptr(rax, 1);
 6915   jcc(Assembler::zero, skip);
 6916   int call_offset = -1;
 6917 
 6918 #ifdef _LP64
 6919   // The following code is similar to allocate_instance but has some slight differences,
 6920   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 6921   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
 6922   Label slow_case;
 6923   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 6924   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 6925   if (vk != nullptr) {
 6926     // Called from C1, where the return type is statically known.
 6927     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 6928     jint lh = vk->layout_helper();
 6929     assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 6930     if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
 6931       tlab_allocate(r15_thread, rax, noreg, lh, r13, r14, slow_case);
 6932     } else {
 6933       jmp(slow_case);
 6934     }
 6935   } else {
 6936     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 6937     mov(rbx, rax);
 6938     andptr(rbx, -2);
 6939     if (UseTLAB) {
 6940       movl(r14, Address(rbx, Klass::layout_helper_offset()));
 6941       testl(r14, Klass::_lh_instance_slow_path_bit);
 6942       jcc(Assembler::notZero, slow_case);
 6943       tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case);
 6944     } else {
 6945       jmp(slow_case);
 6946     }
 6947   }
 6948   if (UseTLAB) {
 6949     // 2. Initialize buffered inline instance header
 6950     Register buffer_obj = rax;
 6951     if (UseCompactObjectHeaders) {
 6952       Register mark_word = r13;
 6953       movptr(mark_word, Address(rbx, Klass::prototype_header_offset()));
 6954       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 6955     } else {
 6956       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 6957       xorl(r13, r13);
 6958       store_klass_gap(buffer_obj, r13);
 6959       if (vk == nullptr) {
 6960         // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 6961         mov(r13, rbx);
 6962       }
 6963       store_klass(buffer_obj, rbx, rscratch1);
 6964     }
 6965     // 3. Initialize its fields with an inline class specific handler
 6966     if (vk != nullptr) {
 6967       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 6968     } else {
 6969       movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 6970       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 6971       call(rbx);
 6972     }
 6973     jmp(skip);
 6974   }
 6975   bind(slow_case);
 6976   // We failed to allocate a new inline type, fall back to a runtime
 6977   // call. Some oop field may be live in some registers but we can't
 6978   // tell. That runtime call will take care of preserving them
 6979   // across a GC if there's one.
 6980   mov(rax, rscratch1);
 6981 #endif
 6982 
 6983   if (from_interpreter) {
 6984     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 6985   } else {
 6986     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 6987     call_offset = offset();
 6988   }
 6989 
 6990   bind(skip);
 6991   return call_offset;
 6992 }
 6993 
 6994 // Move a value between registers/stack slots and update the reg_state
 6995 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6996   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6997   if (reg_state[to->value()] == reg_written) {
 6998     return true; // Already written
 6999   }
 7000   if (from != to && bt != T_VOID) {
 7001     if (reg_state[to->value()] == reg_readonly) {
 7002       return false; // Not yet writable
 7003     }
 7004     if (from->is_reg()) {
 7005       if (to->is_reg()) {
 7006         if (from->is_XMMRegister()) {
 7007           if (bt == T_DOUBLE) {
 7008             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 7009           } else {
 7010             assert(bt == T_FLOAT, "must be float");
 7011             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 7012           }
 7013         } else {
 7014           movq(to->as_Register(), from->as_Register());
 7015         }
 7016       } else {
 7017         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 7018         Address to_addr = Address(rsp, st_off);
 7019         if (from->is_XMMRegister()) {
 7020           if (bt == T_DOUBLE) {
 7021             movdbl(to_addr, from->as_XMMRegister());
 7022           } else {
 7023             assert(bt == T_FLOAT, "must be float");
 7024             movflt(to_addr, from->as_XMMRegister());
 7025           }
 7026         } else {
 7027           movq(to_addr, from->as_Register());
 7028         }
 7029       }
 7030     } else {
 7031       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 7032       if (to->is_reg()) {
 7033         if (to->is_XMMRegister()) {
 7034           if (bt == T_DOUBLE) {
 7035             movdbl(to->as_XMMRegister(), from_addr);
 7036           } else {
 7037             assert(bt == T_FLOAT, "must be float");
 7038             movflt(to->as_XMMRegister(), from_addr);
 7039           }
 7040         } else {
 7041           movq(to->as_Register(), from_addr);
 7042         }
 7043       } else {
 7044         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 7045         movq(r13, from_addr);
 7046         movq(Address(rsp, st_off), r13);
 7047       }
 7048     }
 7049   }
 7050   // Update register states
 7051   reg_state[from->value()] = reg_writable;
 7052   reg_state[to->value()] = reg_written;
 7053   return true;
 7054 }
 7055 
 7056 // Calculate the extra stack space required for packing or unpacking inline
 7057 // args and adjust the stack pointer
 7058 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 7059   // Two additional slots to account for return address
 7060   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 7061   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 7062   // Save the return address, adjust the stack (make sure it is properly
 7063   // 16-byte aligned) and copy the return address to the new top of the stack.
 7064   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 7065   assert(sp_inc > 0, "sanity");
 7066   pop(r13);
 7067   subptr(rsp, sp_inc);
 7068   push(r13);
 7069   return sp_inc;
 7070 }
 7071 
 7072 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 7073 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 7074                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 7075                                           RegState reg_state[]) {
 7076   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 7077   assert(from->is_valid(), "source must be valid");
 7078   bool progress = false;
 7079 #ifdef ASSERT
 7080   const int start_offset = offset();
 7081 #endif
 7082 
 7083   Label L_null, L_notNull;
 7084   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 7085   Register tmp1 = r10;
 7086   Register tmp2 = r13;
 7087   Register fromReg = noreg;
 7088   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
 7089   bool done = true;
 7090   bool mark_done = true;
 7091   VMReg toReg;
 7092   BasicType bt;
 7093   // Check if argument requires a null check
 7094   bool null_check = false;
 7095   VMReg nullCheckReg;
 7096   while (stream.next(nullCheckReg, bt)) {
 7097     if (sig->at(stream.sig_index())._offset == -1) {
 7098       null_check = true;
 7099       break;
 7100     }
 7101   }
 7102   stream.reset(sig_index, to_index);
 7103   while (stream.next(toReg, bt)) {
 7104     assert(toReg->is_valid(), "destination must be valid");
 7105     int idx = (int)toReg->value();
 7106     if (reg_state[idx] == reg_readonly) {
 7107       if (idx != from->value()) {
 7108         mark_done = false;
 7109       }
 7110       done = false;
 7111       continue;
 7112     } else if (reg_state[idx] == reg_written) {
 7113       continue;
 7114     }
 7115     assert(reg_state[idx] == reg_writable, "must be writable");
 7116     reg_state[idx] = reg_written;
 7117     progress = true;
 7118 
 7119     if (fromReg == noreg) {
 7120       if (from->is_reg()) {
 7121         fromReg = from->as_Register();
 7122       } else {
 7123         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 7124         movq(tmp1, Address(rsp, st_off));
 7125         fromReg = tmp1;
 7126       }
 7127       if (null_check) {
 7128         // Nullable inline type argument, emit null check
 7129         testptr(fromReg, fromReg);
 7130         jcc(Assembler::zero, L_null);
 7131       }
 7132     }
 7133     int off = sig->at(stream.sig_index())._offset;
 7134     if (off == -1) {
 7135       assert(null_check, "Missing null check at");
 7136       if (toReg->is_stack()) {
 7137         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 7138         movq(Address(rsp, st_off), 1);
 7139       } else {
 7140         movq(toReg->as_Register(), 1);
 7141       }
 7142       continue;
 7143     }
 7144     assert(off > 0, "offset in object should be positive");
 7145     Address fromAddr = Address(fromReg, off);
 7146     if (!toReg->is_XMMRegister()) {
 7147       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 7148       if (is_reference_type(bt)) {
 7149         load_heap_oop(dst, fromAddr);
 7150       } else {
 7151         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 7152         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 7153       }
 7154       if (toReg->is_stack()) {
 7155         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 7156         movq(Address(rsp, st_off), dst);
 7157       }
 7158     } else if (bt == T_DOUBLE) {
 7159       movdbl(toReg->as_XMMRegister(), fromAddr);
 7160     } else {
 7161       assert(bt == T_FLOAT, "must be float");
 7162       movflt(toReg->as_XMMRegister(), fromAddr);
 7163     }
 7164   }
 7165   if (progress && null_check) {
 7166     if (done) {
 7167       jmp(L_notNull);
 7168       bind(L_null);
 7169       // Set IsInit field to zero to signal that the argument is null.
 7170       // Also set all oop fields to zero to make the GC happy.
 7171       stream.reset(sig_index, to_index);
 7172       while (stream.next(toReg, bt)) {
 7173         if (sig->at(stream.sig_index())._offset == -1 ||
 7174             bt == T_OBJECT || bt == T_ARRAY) {
 7175           if (toReg->is_stack()) {
 7176             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 7177             movq(Address(rsp, st_off), 0);
 7178           } else {
 7179             xorq(toReg->as_Register(), toReg->as_Register());
 7180           }
 7181         }
 7182       }
 7183       bind(L_notNull);
 7184     } else {
 7185       bind(L_null);
 7186     }
 7187   }
 7188 
 7189   sig_index = stream.sig_index();
 7190   to_index = stream.regs_index();
 7191 
 7192   if (mark_done && reg_state[from->value()] != reg_written) {
 7193     // This is okay because no one else will write to that slot
 7194     reg_state[from->value()] = reg_writable;
 7195   }
 7196   from_index--;
 7197   assert(progress || (start_offset == offset()), "should not emit code");
 7198   return done;
 7199 }
 7200 
 7201 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 7202                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 7203                                         RegState reg_state[], Register val_array) {
 7204   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
 7205   assert(to->is_valid(), "destination must be valid");
 7206 
 7207   if (reg_state[to->value()] == reg_written) {
 7208     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 7209     return true; // Already written
 7210   }
 7211 
 7212   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 7213   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 7214   Register val_obj_tmp = r11;
 7215   Register from_reg_tmp = r14;
 7216   Register tmp1 = r10;
 7217   Register tmp2 = r13;
 7218   Register tmp3 = rbx;
 7219   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 7220 
 7221   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 7222 
 7223   if (reg_state[to->value()] == reg_readonly) {
 7224     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 7225       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 7226       return false; // Not yet writable
 7227     }
 7228     val_obj = val_obj_tmp;
 7229   }
 7230 
 7231   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
 7232   load_heap_oop(val_obj, Address(val_array, index));
 7233 
 7234   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 7235   VMReg fromReg;
 7236   BasicType bt;
 7237   Label L_null;
 7238   while (stream.next(fromReg, bt)) {
 7239     assert(fromReg->is_valid(), "source must be valid");
 7240     reg_state[fromReg->value()] = reg_writable;
 7241 
 7242     int off = sig->at(stream.sig_index())._offset;
 7243     if (off == -1) {
 7244       // Nullable inline type argument, emit null check
 7245       Label L_notNull;
 7246       if (fromReg->is_stack()) {
 7247         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 7248         testb(Address(rsp, ld_off), 1);
 7249       } else {
 7250         testb(fromReg->as_Register(), 1);
 7251       }
 7252       jcc(Assembler::notZero, L_notNull);
 7253       movptr(val_obj, 0);
 7254       jmp(L_null);
 7255       bind(L_notNull);
 7256       continue;
 7257     }
 7258 
 7259     assert(off > 0, "offset in object should be positive");
 7260     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 7261 
 7262     Address dst(val_obj, off);
 7263     if (!fromReg->is_XMMRegister()) {
 7264       Register src;
 7265       if (fromReg->is_stack()) {
 7266         src = from_reg_tmp;
 7267         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 7268         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 7269       } else {
 7270         src = fromReg->as_Register();
 7271       }
 7272       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 7273       if (is_reference_type(bt)) {
 7274         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 7275       } else {
 7276         store_sized_value(dst, src, size_in_bytes);
 7277       }
 7278     } else if (bt == T_DOUBLE) {
 7279       movdbl(dst, fromReg->as_XMMRegister());
 7280     } else {
 7281       assert(bt == T_FLOAT, "must be float");
 7282       movflt(dst, fromReg->as_XMMRegister());
 7283     }
 7284   }
 7285   bind(L_null);
 7286   sig_index = stream.sig_index();
 7287   from_index = stream.regs_index();
 7288 
 7289   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 7290   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 7291   assert(success, "to register must be writeable");
 7292   return true;
 7293 }
 7294 
 7295 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 7296   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 7297 }
 7298 
 7299 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 7300   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 7301   if (needs_stack_repair) {
 7302     movq(rbp, Address(rsp, initial_framesize));
 7303     // The stack increment resides just below the saved rbp
 7304     addq(rsp, Address(rsp, initial_framesize - wordSize));
 7305   } else {
 7306     if (initial_framesize > 0) {
 7307       addq(rsp, initial_framesize);
 7308     }
 7309     pop(rbp);
 7310   }
 7311 }
 7312 
 7313 // Clearing constant sized memory using YMM/ZMM registers.
 7314 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 7315   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 7316   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 7317 
 7318   int vector64_count = (cnt & (~0x7)) >> 3;
 7319   cnt = cnt & 0x7;
 7320   const int fill64_per_loop = 4;
 7321   const int max_unrolled_fill64 = 8;
 7322 
 7323   // 64 byte initialization loop.
 7324   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 7325   int start64 = 0;
 7326   if (vector64_count > max_unrolled_fill64) {
 7327     Label LOOP;
 7328     Register index = rtmp;
 7329 
 7330     start64 = vector64_count - (vector64_count % fill64_per_loop);
 7331 
 7332     movl(index, 0);

 7382         break;
 7383       case 7:
 7384         if (use64byteVector) {
 7385           movl(rtmp, 0x7F);
 7386           kmovwl(mask, rtmp);
 7387           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 7388         } else {
 7389           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 7390           movl(rtmp, 0x7);
 7391           kmovwl(mask, rtmp);
 7392           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 7393         }
 7394         break;
 7395       default:
 7396         fatal("Unexpected length : %d\n",cnt);
 7397         break;
 7398     }
 7399   }
 7400 }
 7401 
 7402 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 7403                                bool is_large, bool word_copy_only, KRegister mask) {
 7404   // cnt      - number of qwords (8-byte words).
 7405   // base     - start address, qword aligned.
 7406   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 7407   assert(base==rdi, "base register must be edi for rep stos");
 7408   assert(val==rax,   "val register must be eax for rep stos");
 7409   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 7410   assert(InitArrayShortSize % BytesPerLong == 0,
 7411     "InitArrayShortSize should be the multiple of BytesPerLong");
 7412 
 7413   Label DONE;



 7414 
 7415   if (!is_large) {
 7416     Label LOOP, LONG;
 7417     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 7418     jccb(Assembler::greater, LONG);
 7419 
 7420     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 7421 
 7422     decrement(cnt);
 7423     jccb(Assembler::negative, DONE); // Zero length
 7424 
 7425     // Use individual pointer-sized stores for small counts:
 7426     BIND(LOOP);
 7427     movptr(Address(base, cnt, Address::times_ptr), val);
 7428     decrement(cnt);
 7429     jccb(Assembler::greaterEqual, LOOP);
 7430     jmpb(DONE);
 7431 
 7432     BIND(LONG);
 7433   }
 7434 
 7435   // Use longer rep-prefixed ops for non-small counts:
 7436   if (UseFastStosb && !word_copy_only) {
 7437     shlptr(cnt, 3); // convert to number of bytes
 7438     rep_stosb();
 7439   } else if (UseXMMForObjInit) {
 7440     xmm_clear_mem(base, cnt, val, xtmp, mask);
 7441   } else {
 7442     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 7443     rep_stos();
 7444   }
 7445 
 7446   BIND(DONE);
 7447 }
 7448 
 7449 #endif //COMPILER2_OR_JVMCI
 7450 
 7451 
 7452 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 7453                                    Register to, Register value, Register count,
 7454                                    Register rtmp, XMMRegister xtmp) {
 7455   ShortBranchVerifier sbv(this);
 7456   assert_different_registers(to, value, count, rtmp);
 7457   Label L_exit;
 7458   Label L_fill_2_bytes, L_fill_4_bytes;
 7459 
 7460 #if defined(COMPILER2) && defined(_LP64)

11541 
11542   // Load top.
11543   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
11544 
11545   // Check if the lock-stack is full.
11546   cmpl(top, LockStack::end_offset());
11547   jcc(Assembler::greaterEqual, slow);
11548 
11549   // Check for recursion.
11550   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
11551   jcc(Assembler::equal, push);
11552 
11553   // Check header for monitor (0b10).
11554   testptr(reg_rax, markWord::monitor_value);
11555   jcc(Assembler::notZero, slow);
11556 
11557   // Try to lock. Transition lock bits 0b01 => 0b00
11558   movptr(tmp, reg_rax);
11559   andptr(tmp, ~(int32_t)markWord::unlocked_value);
11560   orptr(reg_rax, markWord::unlocked_value);
11561   if (EnableValhalla) {
11562     // Mask inline_type bit such that we go to the slow path if object is an inline type
11563     andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
11564   }
11565   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
11566   jcc(Assembler::notEqual, slow);
11567 
11568   // Restore top, CAS clobbers register.
11569   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
11570 
11571   bind(push);
11572   // After successful lock, push object on lock-stack.
11573   movptr(Address(thread, top), obj);
11574   incrementl(top, oopSize);
11575   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
11576 }
11577 
11578 // Implements lightweight-unlocking.
11579 //
11580 // obj: the object to be unlocked
11581 // reg_rax: rax
11582 // thread: the thread
11583 // tmp: a temporary register
11584 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
< prev index next >