< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "precompiled.hpp"
   26 #include "asm/assembler.hpp"
   27 #include "asm/assembler.inline.hpp"
   28 #include "compiler/compiler_globals.hpp"
   29 #include "compiler/disassembler.hpp"

   30 #include "crc32c.h"
   31 #include "gc/shared/barrierSet.hpp"
   32 #include "gc/shared/barrierSetAssembler.hpp"
   33 #include "gc/shared/collectedHeap.inline.hpp"
   34 #include "gc/shared/tlab_globals.hpp"
   35 #include "interpreter/bytecodeHistogram.hpp"
   36 #include "interpreter/interpreter.hpp"
   37 #include "jvm.h"
   38 #include "memory/resourceArea.hpp"
   39 #include "memory/universe.hpp"
   40 #include "oops/accessDecorators.hpp"
   41 #include "oops/compressedOops.inline.hpp"
   42 #include "oops/klass.inline.hpp"
   43 #include "prims/methodHandles.hpp"
   44 #include "runtime/continuation.hpp"
   45 #include "runtime/interfaceSupport.inline.hpp"
   46 #include "runtime/javaThread.hpp"
   47 #include "runtime/jniHandles.hpp"
   48 #include "runtime/objectMonitor.hpp"
   49 #include "runtime/os.hpp"
   50 #include "runtime/safepoint.hpp"
   51 #include "runtime/safepointMechanism.hpp"
   52 #include "runtime/sharedRuntime.hpp"

   53 #include "runtime/stubRoutines.hpp"
   54 #include "utilities/macros.hpp"




   55 
   56 #ifdef PRODUCT
   57 #define BLOCK_COMMENT(str) /* nothing */
   58 #define STOP(error) stop(error)
   59 #else
   60 #define BLOCK_COMMENT(str) block_comment(str)
   61 #define STOP(error) block_comment(error); stop(error)
   62 #endif
   63 
   64 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   65 
   66 #ifdef ASSERT
   67 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   68 #endif
   69 
   70 static const Assembler::Condition reverse[] = {
   71     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   72     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   73     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   74     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1670   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1671   pass_arg1(this, arg_1);
 1672   pass_arg0(this, arg_0);
 1673   call_VM_leaf(entry_point, 3);
 1674 }
 1675 
 1676 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1677   LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
 1678   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
 1679   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
 1680   pass_arg3(this, arg_3);
 1681   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
 1682   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1683   pass_arg2(this, arg_2);
 1684   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1685   pass_arg1(this, arg_1);
 1686   pass_arg0(this, arg_0);
 1687   call_VM_leaf(entry_point, 3);
 1688 }
 1689 




 1690 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1691   pass_arg0(this, arg_0);
 1692   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1693 }
 1694 
 1695 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1696 
 1697   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1698   pass_arg1(this, arg_1);
 1699   pass_arg0(this, arg_0);
 1700   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1701 }
 1702 
 1703 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1704   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
 1705   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1706   pass_arg2(this, arg_2);
 1707   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1708   pass_arg1(this, arg_1);
 1709   pass_arg0(this, arg_0);

 2849     lea(rscratch, src);
 2850     Assembler::mulss(dst, Address(rscratch, 0));
 2851   }
 2852 }
 2853 
 2854 void MacroAssembler::null_check(Register reg, int offset) {
 2855   if (needs_explicit_null_check(offset)) {
 2856     // provoke OS null exception if reg is null by
 2857     // accessing M[reg] w/o changing any (non-CC) registers
 2858     // NOTE: cmpl is plenty here to provoke a segv
 2859     cmpptr(rax, Address(reg, 0));
 2860     // Note: should probably use testl(rax, Address(reg, 0));
 2861     //       may be shorter code (however, this version of
 2862     //       testl needs to be implemented first)
 2863   } else {
 2864     // nothing to do, (later) access of M[reg + offset]
 2865     // will provoke OS null exception if reg is null
 2866   }
 2867 }
 2868 












































































































































 2869 void MacroAssembler::os_breakpoint() {
 2870   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2871   // (e.g., MSVC can't call ps() otherwise)
 2872   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2873 }
 2874 
 2875 void MacroAssembler::unimplemented(const char* what) {
 2876   const char* buf = nullptr;
 2877   {
 2878     ResourceMark rm;
 2879     stringStream ss;
 2880     ss.print("unimplemented: %s", what);
 2881     buf = code_string(ss.as_string());
 2882   }
 2883   stop(buf);
 2884 }
 2885 
 2886 #ifdef _LP64
 2887 #define XSTATE_BV 0x200
 2888 #endif

 3953 }
 3954 
 3955 // C++ bool manipulation
 3956 void MacroAssembler::testbool(Register dst) {
 3957   if(sizeof(bool) == 1)
 3958     testb(dst, 0xff);
 3959   else if(sizeof(bool) == 2) {
 3960     // testw implementation needed for two byte bools
 3961     ShouldNotReachHere();
 3962   } else if(sizeof(bool) == 4)
 3963     testl(dst, dst);
 3964   else
 3965     // unsupported
 3966     ShouldNotReachHere();
 3967 }
 3968 
 3969 void MacroAssembler::testptr(Register dst, Register src) {
 3970   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 3971 }
 3972 


















































































































 3973 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3974 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 3975                                    Register var_size_in_bytes,
 3976                                    int con_size_in_bytes,
 3977                                    Register t1,
 3978                                    Register t2,
 3979                                    Label& slow_case) {
 3980   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3981   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3982 }
 3983 
 3984 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3985   RegSet regs;
 3986 #ifdef _LP64
 3987   regs += RegSet::of(rax, rcx, rdx);
 3988 #ifndef WINDOWS
 3989   regs += RegSet::of(rsi, rdi);
 3990 #endif
 3991   regs += RegSet::range(r8, r11);
 3992 #else

 4205     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4206     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4207     // index could be 0 now, must check again
 4208     jcc(Assembler::zero, done);
 4209     bind(even);
 4210   }
 4211 #endif // !_LP64
 4212   // initialize remaining object fields: index is a multiple of 2 now
 4213   {
 4214     Label loop;
 4215     bind(loop);
 4216     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4217     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4218     decrement(index);
 4219     jcc(Assembler::notZero, loop);
 4220   }
 4221 
 4222   bind(done);
 4223 }
 4224 


















































 4225 // Look up the method for a megamorphic invokeinterface call.
 4226 // The target method is determined by <intf_klass, itable_index>.
 4227 // The receiver klass is in recv_klass.
 4228 // On success, the result will be in method_result, and execution falls through.
 4229 // On failure, execution transfers to the given label.
 4230 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4231                                              Register intf_klass,
 4232                                              RegisterOrConstant itable_index,
 4233                                              Register method_result,
 4234                                              Register scan_temp,
 4235                                              Label& L_no_such_interface,
 4236                                              bool return_method) {
 4237   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4238   assert_different_registers(method_result, intf_klass, scan_temp);
 4239   assert(recv_klass != method_result || !return_method,
 4240          "recv_klass can be destroyed when method isn't needed");
 4241 
 4242   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4243          "caller must use same register for non-constant itable index as for method");
 4244 

 4553   } else {
 4554     Label L;
 4555     jccb(negate_condition(cc), L);
 4556     movl(dst, src);
 4557     bind(L);
 4558   }
 4559 }
 4560 
 4561 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4562   if (VM_Version::supports_cmov()) {
 4563     cmovl(cc, dst, src);
 4564   } else {
 4565     Label L;
 4566     jccb(negate_condition(cc), L);
 4567     movl(dst, src);
 4568     bind(L);
 4569   }
 4570 }
 4571 
 4572 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4573   if (!VerifyOops) return;




 4574 
 4575   BLOCK_COMMENT("verify_oop {");
 4576 #ifdef _LP64
 4577   push(rscratch1);
 4578 #endif
 4579   push(rax);                          // save rax
 4580   push(reg);                          // pass register argument
 4581 
 4582   // Pass register number to verify_oop_subroutine
 4583   const char* b = nullptr;
 4584   {
 4585     ResourceMark rm;
 4586     stringStream ss;
 4587     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4588     b = code_string(ss.as_string());
 4589   }
 4590   ExternalAddress buffer((address) b);
 4591   pushptr(buffer.addr(), rscratch1);
 4592 
 4593   // call indirectly to solve generation ordering problem

 4615   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4616   int stackElementSize = Interpreter::stackElementSize;
 4617   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4618 #ifdef ASSERT
 4619   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4620   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4621 #endif
 4622   Register             scale_reg    = noreg;
 4623   Address::ScaleFactor scale_factor = Address::no_scale;
 4624   if (arg_slot.is_constant()) {
 4625     offset += arg_slot.as_constant() * stackElementSize;
 4626   } else {
 4627     scale_reg    = arg_slot.as_register();
 4628     scale_factor = Address::times(stackElementSize);
 4629   }
 4630   offset += wordSize;           // return PC is on stack
 4631   return Address(rsp, scale_reg, scale_factor, offset);
 4632 }
 4633 
 4634 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4635   if (!VerifyOops) return;




 4636 
 4637 #ifdef _LP64
 4638   push(rscratch1);
 4639 #endif
 4640   push(rax); // save rax,
 4641   // addr may contain rsp so we will have to adjust it based on the push
 4642   // we just did (and on 64 bit we do two pushes)
 4643   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4644   // stores rax into addr which is backwards of what was intended.
 4645   if (addr.uses(rsp)) {
 4646     lea(rax, addr);
 4647     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 4648   } else {
 4649     pushptr(addr);
 4650   }
 4651 
 4652   // Pass register number to verify_oop_subroutine
 4653   const char* b = nullptr;
 4654   {
 4655     ResourceMark rm;

 5102 
 5103 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5104   // get mirror
 5105   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5106   load_method_holder(mirror, method);
 5107   movptr(mirror, Address(mirror, mirror_offset));
 5108   resolve_oop_handle(mirror, tmp);
 5109 }
 5110 
 5111 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5112   load_method_holder(rresult, rmethod);
 5113   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5114 }
 5115 
 5116 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5117   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5118   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5119   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5120 }
 5121 








 5122 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5123   assert_different_registers(src, tmp);
 5124   assert_different_registers(dst, tmp);
 5125 #ifdef _LP64
 5126   if (UseCompressedClassPointers) {
 5127     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5128     decode_klass_not_null(dst, tmp);
 5129   } else
 5130 #endif
 5131     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));





 5132 }
 5133 
 5134 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5135   assert_different_registers(src, tmp);
 5136   assert_different_registers(dst, tmp);
 5137 #ifdef _LP64
 5138   if (UseCompressedClassPointers) {
 5139     encode_klass_not_null(src, tmp);
 5140     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5141   } else
 5142 #endif
 5143     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5144 }
 5145 
 5146 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5147                                     Register tmp1, Register thread_tmp) {
 5148   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5149   decorators = AccessInternal::decorator_fixup(decorators, type);
 5150   bool as_raw = (decorators & AS_RAW) != 0;
 5151   if (as_raw) {
 5152     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5153   } else {
 5154     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5155   }
 5156 }
 5157 
 5158 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5159                                      Register tmp1, Register tmp2, Register tmp3) {
 5160   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5161   decorators = AccessInternal::decorator_fixup(decorators, type);
 5162   bool as_raw = (decorators & AS_RAW) != 0;
 5163   if (as_raw) {
 5164     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5165   } else {
 5166     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5167   }
 5168 }
 5169 








































 5170 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 5171                                    Register thread_tmp, DecoratorSet decorators) {
 5172   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 5173 }
 5174 
 5175 // Doesn't do verification, generates fixed size code
 5176 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 5177                                             Register thread_tmp, DecoratorSet decorators) {
 5178   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 5179 }
 5180 
 5181 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5182                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5183   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5184 }
 5185 
 5186 // Used for storing nulls.
 5187 void MacroAssembler::store_heap_oop_null(Address dst) {
 5188   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5189 }

 5489 
 5490 void MacroAssembler::reinit_heapbase() {
 5491   if (UseCompressedOops) {
 5492     if (Universe::heap() != nullptr) {
 5493       if (CompressedOops::base() == nullptr) {
 5494         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5495       } else {
 5496         mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());
 5497       }
 5498     } else {
 5499       movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
 5500     }
 5501   }
 5502 }
 5503 
 5504 #endif // _LP64
 5505 
 5506 #if COMPILER2_OR_JVMCI
 5507 
 5508 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5509 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5510   // cnt - number of qwords (8-byte words).
 5511   // base - start address, qword aligned.
 5512   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5513   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5514   if (use64byteVector) {
 5515     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
 5516   } else if (MaxVectorSize >= 32) {
 5517     vpxor(xtmp, xtmp, xtmp, AVX_256bit);


 5518   } else {
 5519     pxor(xtmp, xtmp);

 5520   }
 5521   jmp(L_zero_64_bytes);
 5522 
 5523   BIND(L_loop);
 5524   if (MaxVectorSize >= 32) {
 5525     fill64(base, 0, xtmp, use64byteVector);
 5526   } else {
 5527     movdqu(Address(base,  0), xtmp);
 5528     movdqu(Address(base, 16), xtmp);
 5529     movdqu(Address(base, 32), xtmp);
 5530     movdqu(Address(base, 48), xtmp);
 5531   }
 5532   addptr(base, 64);
 5533 
 5534   BIND(L_zero_64_bytes);
 5535   subptr(cnt, 8);
 5536   jccb(Assembler::greaterEqual, L_loop);
 5537 
 5538   // Copy trailing 64 bytes
 5539   if (use64byteVector) {
 5540     addptr(cnt, 8);
 5541     jccb(Assembler::equal, L_end);
 5542     fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
 5543     jmp(L_end);
 5544   } else {
 5545     addptr(cnt, 4);
 5546     jccb(Assembler::less, L_tail);
 5547     if (MaxVectorSize >= 32) {
 5548       vmovdqu(Address(base, 0), xtmp);
 5549     } else {
 5550       movdqu(Address(base,  0), xtmp);
 5551       movdqu(Address(base, 16), xtmp);
 5552     }
 5553   }
 5554   addptr(base, 32);
 5555   subptr(cnt, 4);
 5556 
 5557   BIND(L_tail);
 5558   addptr(cnt, 4);
 5559   jccb(Assembler::lessEqual, L_end);
 5560   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5561     fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
 5562   } else {
 5563     decrement(cnt);
 5564 
 5565     BIND(L_sloop);
 5566     movq(Address(base, 0), xtmp);
 5567     addptr(base, 8);
 5568     decrement(cnt);
 5569     jccb(Assembler::greaterEqual, L_sloop);
 5570   }
 5571   BIND(L_end);
 5572 }
 5573 














































































































































































































































































































































































































 5574 // Clearing constant sized memory using YMM/ZMM registers.
 5575 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5576   assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), "");
 5577   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 5578 
 5579   int vector64_count = (cnt & (~0x7)) >> 3;
 5580   cnt = cnt & 0x7;
 5581   const int fill64_per_loop = 4;
 5582   const int max_unrolled_fill64 = 8;
 5583 
 5584   // 64 byte initialization loop.
 5585   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 5586   int start64 = 0;
 5587   if (vector64_count > max_unrolled_fill64) {
 5588     Label LOOP;
 5589     Register index = rtmp;
 5590 
 5591     start64 = vector64_count - (vector64_count % fill64_per_loop);
 5592 
 5593     movl(index, 0);

 5643         break;
 5644       case 7:
 5645         if (use64byteVector) {
 5646           movl(rtmp, 0x7F);
 5647           kmovwl(mask, rtmp);
 5648           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5649         } else {
 5650           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5651           movl(rtmp, 0x7);
 5652           kmovwl(mask, rtmp);
 5653           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 5654         }
 5655         break;
 5656       default:
 5657         fatal("Unexpected length : %d\n",cnt);
 5658         break;
 5659     }
 5660   }
 5661 }
 5662 
 5663 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
 5664                                bool is_large, KRegister mask) {
 5665   // cnt      - number of qwords (8-byte words).
 5666   // base     - start address, qword aligned.
 5667   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 5668   assert(base==rdi, "base register must be edi for rep stos");
 5669   assert(tmp==rax,   "tmp register must be eax for rep stos");
 5670   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 5671   assert(InitArrayShortSize % BytesPerLong == 0,
 5672     "InitArrayShortSize should be the multiple of BytesPerLong");
 5673 
 5674   Label DONE;
 5675   if (!is_large || !UseXMMForObjInit) {
 5676     xorptr(tmp, tmp);
 5677   }
 5678 
 5679   if (!is_large) {
 5680     Label LOOP, LONG;
 5681     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 5682     jccb(Assembler::greater, LONG);
 5683 
 5684     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 5685 
 5686     decrement(cnt);
 5687     jccb(Assembler::negative, DONE); // Zero length
 5688 
 5689     // Use individual pointer-sized stores for small counts:
 5690     BIND(LOOP);
 5691     movptr(Address(base, cnt, Address::times_ptr), tmp);
 5692     decrement(cnt);
 5693     jccb(Assembler::greaterEqual, LOOP);
 5694     jmpb(DONE);
 5695 
 5696     BIND(LONG);
 5697   }
 5698 
 5699   // Use longer rep-prefixed ops for non-small counts:
 5700   if (UseFastStosb) {
 5701     shlptr(cnt, 3); // convert to number of bytes
 5702     rep_stosb();
 5703   } else if (UseXMMForObjInit) {
 5704     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
 5705   } else {
 5706     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 5707     rep_stos();
 5708   }
 5709 
 5710   BIND(DONE);
 5711 }
 5712 
 5713 #endif //COMPILER2_OR_JVMCI
 5714 
 5715 
 5716 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 5717                                    Register to, Register value, Register count,
 5718                                    Register rtmp, XMMRegister xtmp) {
 5719   ShortBranchVerifier sbv(this);
 5720   assert_different_registers(to, value, count, rtmp);
 5721   Label L_exit;
 5722   Label L_fill_2_bytes, L_fill_4_bytes;
 5723 
 5724 #if defined(COMPILER2) && defined(_LP64)

   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "precompiled.hpp"
   26 #include "asm/assembler.hpp"
   27 #include "asm/assembler.inline.hpp"
   28 #include "compiler/compiler_globals.hpp"
   29 #include "compiler/disassembler.hpp"
   30 #include "ci/ciInlineKlass.hpp"
   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "jvm.h"
   39 #include "memory/resourceArea.hpp"
   40 #include "memory/universe.hpp"
   41 #include "oops/accessDecorators.hpp"
   42 #include "oops/compressedOops.inline.hpp"
   43 #include "oops/klass.inline.hpp"
   44 #include "prims/methodHandles.hpp"
   45 #include "runtime/continuation.hpp"
   46 #include "runtime/interfaceSupport.inline.hpp"
   47 #include "runtime/javaThread.hpp"
   48 #include "runtime/jniHandles.hpp"
   49 #include "runtime/objectMonitor.hpp"
   50 #include "runtime/os.hpp"
   51 #include "runtime/safepoint.hpp"
   52 #include "runtime/safepointMechanism.hpp"
   53 #include "runtime/sharedRuntime.hpp"
   54 #include "runtime/signature_cc.hpp"
   55 #include "runtime/stubRoutines.hpp"
   56 #include "utilities/macros.hpp"
   57 #include "vmreg_x86.inline.hpp"
   58 #ifdef COMPILER2
   59 #include "opto/output.hpp"
   60 #endif
   61 
   62 #ifdef PRODUCT
   63 #define BLOCK_COMMENT(str) /* nothing */
   64 #define STOP(error) stop(error)
   65 #else
   66 #define BLOCK_COMMENT(str) block_comment(str)
   67 #define STOP(error) block_comment(error); stop(error)
   68 #endif
   69 
   70 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   71 
   72 #ifdef ASSERT
   73 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   74 #endif
   75 
   76 static const Assembler::Condition reverse[] = {
   77     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   78     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   79     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   80     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1676   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1677   pass_arg1(this, arg_1);
 1678   pass_arg0(this, arg_0);
 1679   call_VM_leaf(entry_point, 3);
 1680 }
 1681 
 1682 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1683   LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
 1684   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
 1685   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
 1686   pass_arg3(this, arg_3);
 1687   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
 1688   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1689   pass_arg2(this, arg_2);
 1690   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1691   pass_arg1(this, arg_1);
 1692   pass_arg0(this, arg_0);
 1693   call_VM_leaf(entry_point, 3);
 1694 }
 1695 
 1696 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1697   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1698 }
 1699 
 1700 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1701   pass_arg0(this, arg_0);
 1702   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1703 }
 1704 
 1705 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1706 
 1707   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1708   pass_arg1(this, arg_1);
 1709   pass_arg0(this, arg_0);
 1710   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1711 }
 1712 
 1713 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1714   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
 1715   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1716   pass_arg2(this, arg_2);
 1717   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1718   pass_arg1(this, arg_1);
 1719   pass_arg0(this, arg_0);

 2859     lea(rscratch, src);
 2860     Assembler::mulss(dst, Address(rscratch, 0));
 2861   }
 2862 }
 2863 
 2864 void MacroAssembler::null_check(Register reg, int offset) {
 2865   if (needs_explicit_null_check(offset)) {
 2866     // provoke OS null exception if reg is null by
 2867     // accessing M[reg] w/o changing any (non-CC) registers
 2868     // NOTE: cmpl is plenty here to provoke a segv
 2869     cmpptr(rax, Address(reg, 0));
 2870     // Note: should probably use testl(rax, Address(reg, 0));
 2871     //       may be shorter code (however, this version of
 2872     //       testl needs to be implemented first)
 2873   } else {
 2874     // nothing to do, (later) access of M[reg + offset]
 2875     // will provoke OS null exception if reg is null
 2876   }
 2877 }
 2878 
 2879 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2880   andptr(markword, markWord::inline_type_mask_in_place);
 2881   cmpptr(markword, markWord::inline_type_pattern);
 2882   jcc(Assembler::equal, is_inline_type);
 2883 }
 2884 
 2885 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
 2886   movl(temp_reg, Address(klass, Klass::access_flags_offset()));
 2887   testl(temp_reg, JVM_ACC_VALUE);
 2888   jcc(Assembler::notZero, is_inline_type);
 2889 }
 2890 
 2891 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
 2892   testptr(object, object);
 2893   jcc(Assembler::zero, not_inline_type);
 2894   const int is_inline_type_mask = markWord::inline_type_pattern;
 2895   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2896   andptr(tmp, is_inline_type_mask);
 2897   cmpptr(tmp, is_inline_type_mask);
 2898   jcc(Assembler::notEqual, not_inline_type);
 2899 }
 2900 
 2901 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
 2902 #ifdef ASSERT
 2903   {
 2904     Label done_check;
 2905     test_klass_is_inline_type(klass, temp_reg, done_check);
 2906     stop("test_klass_is_empty_inline_type with non inline type klass");
 2907     bind(done_check);
 2908   }
 2909 #endif
 2910   movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset()));
 2911   testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value());
 2912   jcc(Assembler::notZero, is_empty_inline_type);
 2913 }
 2914 
 2915 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2916   movl(temp_reg, flags);
 2917   shrl(temp_reg, ConstantPoolCacheEntry::is_null_free_inline_type_shift);
 2918   andl(temp_reg, 0x1);
 2919   testl(temp_reg, temp_reg);
 2920   jcc(Assembler::notZero, is_null_free_inline_type);
 2921 }
 2922 
 2923 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2924   movl(temp_reg, flags);
 2925   shrl(temp_reg, ConstantPoolCacheEntry::is_null_free_inline_type_shift);
 2926   andl(temp_reg, 0x1);
 2927   testl(temp_reg, temp_reg);
 2928   jcc(Assembler::zero, not_null_free_inline_type);
 2929 }
 2930 
 2931 void MacroAssembler::test_field_is_inlined(Register flags, Register temp_reg, Label& is_inlined) {
 2932   movl(temp_reg, flags);
 2933   shrl(temp_reg, ConstantPoolCacheEntry::is_inlined_shift);
 2934   andl(temp_reg, 0x1);
 2935   testl(temp_reg, temp_reg);
 2936   jcc(Assembler::notZero, is_inlined);
 2937 }
 2938 
 2939 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2940   Label test_mark_word;
 2941   // load mark word
 2942   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2943   // check displaced
 2944   testl(temp_reg, markWord::unlocked_value);
 2945   jccb(Assembler::notZero, test_mark_word);
 2946   // slow path use klass prototype
 2947   push(rscratch1);
 2948   load_prototype_header(temp_reg, oop, rscratch1);
 2949   pop(rscratch1);
 2950 
 2951   bind(test_mark_word);
 2952   testl(temp_reg, test_bit);
 2953   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2954 }
 2955 
 2956 void MacroAssembler::test_flattened_array_oop(Register oop, Register temp_reg,
 2957                                               Label&is_flattened_array) {
 2958 #ifdef _LP64
 2959   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flattened_array);
 2960 #else
 2961   load_klass(temp_reg, oop, noreg);
 2962   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2963   test_flattened_array_layout(temp_reg, is_flattened_array);
 2964 #endif
 2965 }
 2966 
 2967 void MacroAssembler::test_non_flattened_array_oop(Register oop, Register temp_reg,
 2968                                                   Label&is_non_flattened_array) {
 2969 #ifdef _LP64
 2970   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flattened_array);
 2971 #else
 2972   load_klass(temp_reg, oop, noreg);
 2973   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2974   test_non_flattened_array_layout(temp_reg, is_non_flattened_array);
 2975 #endif
 2976 }
 2977 
 2978 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 2979 #ifdef _LP64
 2980   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 2981 #else
 2982   load_klass(temp_reg, oop, noreg);
 2983   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2984   test_null_free_array_layout(temp_reg, is_null_free_array);
 2985 #endif
 2986 }
 2987 
 2988 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 2989 #ifdef _LP64
 2990   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 2991 #else
 2992   load_klass(temp_reg, oop, noreg);
 2993   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2994   test_non_null_free_array_layout(temp_reg, is_non_null_free_array);
 2995 #endif
 2996 }
 2997 
 2998 void MacroAssembler::test_flattened_array_layout(Register lh, Label& is_flattened_array) {
 2999   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 3000   jcc(Assembler::notZero, is_flattened_array);
 3001 }
 3002 
 3003 void MacroAssembler::test_non_flattened_array_layout(Register lh, Label& is_non_flattened_array) {
 3004   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 3005   jcc(Assembler::zero, is_non_flattened_array);
 3006 }
 3007 
 3008 void MacroAssembler::test_null_free_array_layout(Register lh, Label& is_null_free_array) {
 3009   testl(lh, Klass::_lh_null_free_array_bit_inplace);
 3010   jcc(Assembler::notZero, is_null_free_array);
 3011 }
 3012 
 3013 void MacroAssembler::test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array) {
 3014   testl(lh, Klass::_lh_null_free_array_bit_inplace);
 3015   jcc(Assembler::zero, is_non_null_free_array);
 3016 }
 3017 
 3018 
 3019 void MacroAssembler::os_breakpoint() {
 3020   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 3021   // (e.g., MSVC can't call ps() otherwise)
 3022   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 3023 }
 3024 
 3025 void MacroAssembler::unimplemented(const char* what) {
 3026   const char* buf = nullptr;
 3027   {
 3028     ResourceMark rm;
 3029     stringStream ss;
 3030     ss.print("unimplemented: %s", what);
 3031     buf = code_string(ss.as_string());
 3032   }
 3033   stop(buf);
 3034 }
 3035 
 3036 #ifdef _LP64
 3037 #define XSTATE_BV 0x200
 3038 #endif

 4103 }
 4104 
 4105 // C++ bool manipulation
 4106 void MacroAssembler::testbool(Register dst) {
 4107   if(sizeof(bool) == 1)
 4108     testb(dst, 0xff);
 4109   else if(sizeof(bool) == 2) {
 4110     // testw implementation needed for two byte bools
 4111     ShouldNotReachHere();
 4112   } else if(sizeof(bool) == 4)
 4113     testl(dst, dst);
 4114   else
 4115     // unsupported
 4116     ShouldNotReachHere();
 4117 }
 4118 
 4119 void MacroAssembler::testptr(Register dst, Register src) {
 4120   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 4121 }
 4122 
 4123 // Object / value buffer allocation...
 4124 //
 4125 // Kills klass and rsi on LP64
 4126 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 4127                                        Register t1, Register t2,
 4128                                        bool clear_fields, Label& alloc_failed)
 4129 {
 4130   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 4131   Register layout_size = t1;
 4132   assert(new_obj == rax, "needs to be rax");
 4133   assert_different_registers(klass, new_obj, t1, t2);
 4134 
 4135   // get instance_size in InstanceKlass (scaled to a count of bytes)
 4136   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 4137   // test to see if it has a finalizer or is malformed in some way
 4138   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 4139   jcc(Assembler::notZero, slow_case_no_pop);
 4140 
 4141   // Allocate the instance:
 4142   //  If TLAB is enabled:
 4143   //    Try to allocate in the TLAB.
 4144   //    If fails, go to the slow path.
 4145   //  Else If inline contiguous allocations are enabled:
 4146   //    Try to allocate in eden.
 4147   //    If fails due to heap end, go to slow path.
 4148   //
 4149   //  If TLAB is enabled OR inline contiguous is enabled:
 4150   //    Initialize the allocation.
 4151   //    Exit.
 4152   //
 4153   //  Go to slow path.
 4154 
 4155   push(klass);
 4156   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass);
 4157 #ifndef _LP64
 4158   if (UseTLAB) {
 4159     get_thread(thread);
 4160   }
 4161 #endif // _LP64
 4162 
 4163   if (UseTLAB) {
 4164     tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case);
 4165     if (ZeroTLAB || (!clear_fields)) {
 4166       // the fields have been already cleared
 4167       jmp(initialize_header);
 4168     } else {
 4169       // initialize both the header and fields
 4170       jmp(initialize_object);
 4171     }
 4172   } else {
 4173     jmp(slow_case);
 4174   }
 4175 
 4176   // If UseTLAB is true, the object is created above and there is an initialize need.
 4177   // Otherwise, skip and go to the slow path.
 4178   if (UseTLAB) {
 4179     if (clear_fields) {
 4180       // The object is initialized before the header.  If the object size is
 4181       // zero, go directly to the header initialization.
 4182       bind(initialize_object);
 4183       decrement(layout_size, sizeof(oopDesc));
 4184       jcc(Assembler::zero, initialize_header);
 4185 
 4186       // Initialize topmost object field, divide size by 8, check if odd and
 4187       // test if zero.
 4188       Register zero = klass;
 4189       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 4190       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 4191 
 4192   #ifdef ASSERT
 4193       // make sure instance_size was multiple of 8
 4194       Label L;
 4195       // Ignore partial flag stall after shrl() since it is debug VM
 4196       jcc(Assembler::carryClear, L);
 4197       stop("object size is not multiple of 2 - adjust this code");
 4198       bind(L);
 4199       // must be > 0, no extra check needed here
 4200   #endif
 4201 
 4202       // initialize remaining object fields: instance_size was a multiple of 8
 4203       {
 4204         Label loop;
 4205         bind(loop);
 4206         movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero);
 4207         NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero));
 4208         decrement(layout_size);
 4209         jcc(Assembler::notZero, loop);
 4210       }
 4211     } // clear_fields
 4212 
 4213     // initialize object header only.
 4214     bind(initialize_header);
 4215     pop(klass);
 4216     Register mark_word = t2;
 4217     movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 4218     movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 4219 #ifdef _LP64
 4220     xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 4221     store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 4222 #endif
 4223     movptr(t2, klass);         // preserve klass
 4224     store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 4225 
 4226     jmp(done);
 4227   }
 4228 
 4229   bind(slow_case);
 4230   pop(klass);
 4231   bind(slow_case_no_pop);
 4232   jmp(alloc_failed);
 4233 
 4234   bind(done);
 4235 }
 4236 
 4237 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 4238 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 4239                                    Register var_size_in_bytes,
 4240                                    int con_size_in_bytes,
 4241                                    Register t1,
 4242                                    Register t2,
 4243                                    Label& slow_case) {
 4244   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 4245   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 4246 }
 4247 
 4248 RegSet MacroAssembler::call_clobbered_gp_registers() {
 4249   RegSet regs;
 4250 #ifdef _LP64
 4251   regs += RegSet::of(rax, rcx, rdx);
 4252 #ifndef WINDOWS
 4253   regs += RegSet::of(rsi, rdi);
 4254 #endif
 4255   regs += RegSet::range(r8, r11);
 4256 #else

 4469     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4470     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4471     // index could be 0 now, must check again
 4472     jcc(Assembler::zero, done);
 4473     bind(even);
 4474   }
 4475 #endif // !_LP64
 4476   // initialize remaining object fields: index is a multiple of 2 now
 4477   {
 4478     Label loop;
 4479     bind(loop);
 4480     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4481     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4482     decrement(index);
 4483     jcc(Assembler::notZero, loop);
 4484   }
 4485 
 4486   bind(done);
 4487 }
 4488 
 4489 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) {
 4490   movptr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset()));
 4491 #ifdef ASSERT
 4492   {
 4493     Label done;
 4494     cmpptr(inline_klass, 0);
 4495     jcc(Assembler::notEqual, done);
 4496     stop("get_inline_type_field_klass contains no inline klass");
 4497     bind(done);
 4498   }
 4499 #endif
 4500   movptr(inline_klass, Address(inline_klass, index, Address::times_ptr));
 4501 }
 4502 
 4503 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
 4504 #ifdef ASSERT
 4505   {
 4506     Label done_check;
 4507     test_klass_is_inline_type(inline_klass, temp_reg, done_check);
 4508     stop("get_default_value_oop from non inline type klass");
 4509     bind(done_check);
 4510   }
 4511 #endif
 4512   Register offset = temp_reg;
 4513   // Getting the offset of the pre-allocated default value
 4514   movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
 4515   movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
 4516 
 4517   // Getting the mirror
 4518   movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
 4519   resolve_oop_handle(obj, inline_klass);
 4520 
 4521   // Getting the pre-allocated default value from the mirror
 4522   Address field(obj, offset, Address::times_1);
 4523   load_heap_oop(obj, field);
 4524 }
 4525 
 4526 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
 4527 #ifdef ASSERT
 4528   {
 4529     Label done_check;
 4530     test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
 4531     stop("get_empty_value from non-empty inline klass");
 4532     bind(done_check);
 4533   }
 4534 #endif
 4535   get_default_value_oop(inline_klass, temp_reg, obj);
 4536 }
 4537 
 4538 
 4539 // Look up the method for a megamorphic invokeinterface call.
 4540 // The target method is determined by <intf_klass, itable_index>.
 4541 // The receiver klass is in recv_klass.
 4542 // On success, the result will be in method_result, and execution falls through.
 4543 // On failure, execution transfers to the given label.
 4544 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4545                                              Register intf_klass,
 4546                                              RegisterOrConstant itable_index,
 4547                                              Register method_result,
 4548                                              Register scan_temp,
 4549                                              Label& L_no_such_interface,
 4550                                              bool return_method) {
 4551   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4552   assert_different_registers(method_result, intf_klass, scan_temp);
 4553   assert(recv_klass != method_result || !return_method,
 4554          "recv_klass can be destroyed when method isn't needed");
 4555 
 4556   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4557          "caller must use same register for non-constant itable index as for method");
 4558 

 4867   } else {
 4868     Label L;
 4869     jccb(negate_condition(cc), L);
 4870     movl(dst, src);
 4871     bind(L);
 4872   }
 4873 }
 4874 
 4875 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4876   if (VM_Version::supports_cmov()) {
 4877     cmovl(cc, dst, src);
 4878   } else {
 4879     Label L;
 4880     jccb(negate_condition(cc), L);
 4881     movl(dst, src);
 4882     bind(L);
 4883   }
 4884 }
 4885 
 4886 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4887   if (!VerifyOops || VerifyAdapterSharing) {
 4888     // Below address of the code string confuses VerifyAdapterSharing
 4889     // because it may differ between otherwise equivalent adapters.
 4890     return;
 4891   }
 4892 
 4893   BLOCK_COMMENT("verify_oop {");
 4894 #ifdef _LP64
 4895   push(rscratch1);
 4896 #endif
 4897   push(rax);                          // save rax
 4898   push(reg);                          // pass register argument
 4899 
 4900   // Pass register number to verify_oop_subroutine
 4901   const char* b = nullptr;
 4902   {
 4903     ResourceMark rm;
 4904     stringStream ss;
 4905     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4906     b = code_string(ss.as_string());
 4907   }
 4908   ExternalAddress buffer((address) b);
 4909   pushptr(buffer.addr(), rscratch1);
 4910 
 4911   // call indirectly to solve generation ordering problem

 4933   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4934   int stackElementSize = Interpreter::stackElementSize;
 4935   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4936 #ifdef ASSERT
 4937   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4938   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4939 #endif
 4940   Register             scale_reg    = noreg;
 4941   Address::ScaleFactor scale_factor = Address::no_scale;
 4942   if (arg_slot.is_constant()) {
 4943     offset += arg_slot.as_constant() * stackElementSize;
 4944   } else {
 4945     scale_reg    = arg_slot.as_register();
 4946     scale_factor = Address::times(stackElementSize);
 4947   }
 4948   offset += wordSize;           // return PC is on stack
 4949   return Address(rsp, scale_reg, scale_factor, offset);
 4950 }
 4951 
 4952 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4953   if (!VerifyOops || VerifyAdapterSharing) {
 4954     // Below address of the code string confuses VerifyAdapterSharing
 4955     // because it may differ between otherwise equivalent adapters.
 4956     return;
 4957   }
 4958 
 4959 #ifdef _LP64
 4960   push(rscratch1);
 4961 #endif
 4962   push(rax); // save rax,
 4963   // addr may contain rsp so we will have to adjust it based on the push
 4964   // we just did (and on 64 bit we do two pushes)
 4965   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4966   // stores rax into addr which is backwards of what was intended.
 4967   if (addr.uses(rsp)) {
 4968     lea(rax, addr);
 4969     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 4970   } else {
 4971     pushptr(addr);
 4972   }
 4973 
 4974   // Pass register number to verify_oop_subroutine
 4975   const char* b = nullptr;
 4976   {
 4977     ResourceMark rm;

 5424 
 5425 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5426   // get mirror
 5427   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5428   load_method_holder(mirror, method);
 5429   movptr(mirror, Address(mirror, mirror_offset));
 5430   resolve_oop_handle(mirror, tmp);
 5431 }
 5432 
 5433 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5434   load_method_holder(rresult, rmethod);
 5435   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5436 }
 5437 
 5438 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5439   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5440   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5441   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5442 }
 5443 
 5444 void MacroAssembler::load_metadata(Register dst, Register src) {
 5445   if (UseCompressedClassPointers) {
 5446     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5447   } else {
 5448     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5449   }
 5450 }
 5451 
 5452 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5453   assert_different_registers(src, tmp);
 5454   assert_different_registers(dst, tmp);
 5455 #ifdef _LP64
 5456   if (UseCompressedClassPointers) {
 5457     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5458     decode_klass_not_null(dst, tmp);
 5459   } else
 5460 #endif
 5461   movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5462 }
 5463 
 5464 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5465   load_klass(dst, src, tmp);
 5466   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5467 }
 5468 
 5469 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5470   assert_different_registers(src, tmp);
 5471   assert_different_registers(dst, tmp);
 5472 #ifdef _LP64
 5473   if (UseCompressedClassPointers) {
 5474     encode_klass_not_null(src, tmp);
 5475     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5476   } else
 5477 #endif
 5478     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5479 }
 5480 
 5481 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5482                                     Register tmp1, Register thread_tmp) {
 5483   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5484   decorators = AccessInternal::decorator_fixup(decorators, type);
 5485   bool as_raw = (decorators & AS_RAW) != 0;
 5486   if (as_raw) {
 5487     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5488   } else {
 5489     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5490   }
 5491 }
 5492 
 5493 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5494                                      Register tmp1, Register tmp2, Register tmp3) {
 5495   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5496   decorators = AccessInternal::decorator_fixup(decorators, type);
 5497   bool as_raw = (decorators & AS_RAW) != 0;
 5498   if (as_raw) {
 5499     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5500   } else {
 5501     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5502   }
 5503 }
 5504 
 5505 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst,
 5506                                        Register inline_klass) {
 5507   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5508   bs->value_copy(this, decorators, src, dst, inline_klass);
 5509 }
 5510 
 5511 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) {
 5512   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 5513   movl(offset, Address(offset, InlineKlass::first_field_offset_offset()));
 5514 }
 5515 
 5516 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) {
 5517   // ((address) (void*) o) + vk->first_field_offset();
 5518   Register offset = (data == oop) ? rscratch1 : data;
 5519   first_field_offset(inline_klass, offset);
 5520   if (data == oop) {
 5521     addptr(data, offset);
 5522   } else {
 5523     lea(data, Address(oop, offset));
 5524   }
 5525 }
 5526 
 5527 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5528                                                 Register index, Register data) {
 5529   assert(index != rcx, "index needs to shift by rcx");
 5530   assert_different_registers(array, array_klass, index);
 5531   assert_different_registers(rcx, array, index);
 5532 
 5533   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5534   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5535 
 5536   // Klass::layout_helper_log2_element_size(lh)
 5537   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5538   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5539   andl(rcx, Klass::_lh_log2_element_size_mask);
 5540   shlptr(index); // index << rcx
 5541 
 5542   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT)));
 5543 }
 5544 
 5545 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 5546                                    Register thread_tmp, DecoratorSet decorators) {
 5547   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 5548 }
 5549 
 5550 // Doesn't do verification, generates fixed size code
 5551 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 5552                                             Register thread_tmp, DecoratorSet decorators) {
 5553   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 5554 }
 5555 
 5556 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5557                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5558   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5559 }
 5560 
 5561 // Used for storing nulls.
 5562 void MacroAssembler::store_heap_oop_null(Address dst) {
 5563   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5564 }

 5864 
 5865 void MacroAssembler::reinit_heapbase() {
 5866   if (UseCompressedOops) {
 5867     if (Universe::heap() != nullptr) {
 5868       if (CompressedOops::base() == nullptr) {
 5869         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5870       } else {
 5871         mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());
 5872       }
 5873     } else {
 5874       movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
 5875     }
 5876   }
 5877 }
 5878 
 5879 #endif // _LP64
 5880 
 5881 #if COMPILER2_OR_JVMCI
 5882 
 5883 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5884 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 5885   // cnt - number of qwords (8-byte words).
 5886   // base - start address, qword aligned.
 5887   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5888   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5889   if (use64byteVector) {
 5890     evpbroadcastq(xtmp, val, AVX_512bit);
 5891   } else if (MaxVectorSize >= 32) {
 5892     movdq(xtmp, val);
 5893     punpcklqdq(xtmp, xtmp);
 5894     vinserti128_high(xtmp, xtmp);
 5895   } else {
 5896     movdq(xtmp, val);
 5897     punpcklqdq(xtmp, xtmp);
 5898   }
 5899   jmp(L_zero_64_bytes);
 5900 
 5901   BIND(L_loop);
 5902   if (MaxVectorSize >= 32) {
 5903     fill64(base, 0, xtmp, use64byteVector);
 5904   } else {
 5905     movdqu(Address(base,  0), xtmp);
 5906     movdqu(Address(base, 16), xtmp);
 5907     movdqu(Address(base, 32), xtmp);
 5908     movdqu(Address(base, 48), xtmp);
 5909   }
 5910   addptr(base, 64);
 5911 
 5912   BIND(L_zero_64_bytes);
 5913   subptr(cnt, 8);
 5914   jccb(Assembler::greaterEqual, L_loop);
 5915 
 5916   // Copy trailing 64 bytes
 5917   if (use64byteVector) {
 5918     addptr(cnt, 8);
 5919     jccb(Assembler::equal, L_end);
 5920     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 5921     jmp(L_end);
 5922   } else {
 5923     addptr(cnt, 4);
 5924     jccb(Assembler::less, L_tail);
 5925     if (MaxVectorSize >= 32) {
 5926       vmovdqu(Address(base, 0), xtmp);
 5927     } else {
 5928       movdqu(Address(base,  0), xtmp);
 5929       movdqu(Address(base, 16), xtmp);
 5930     }
 5931   }
 5932   addptr(base, 32);
 5933   subptr(cnt, 4);
 5934 
 5935   BIND(L_tail);
 5936   addptr(cnt, 4);
 5937   jccb(Assembler::lessEqual, L_end);
 5938   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5939     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 5940   } else {
 5941     decrement(cnt);
 5942 
 5943     BIND(L_sloop);
 5944     movq(Address(base, 0), xtmp);
 5945     addptr(base, 8);
 5946     decrement(cnt);
 5947     jccb(Assembler::greaterEqual, L_sloop);
 5948   }
 5949   BIND(L_end);
 5950 }
 5951 
 5952 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 5953   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 5954   // An inline type might be returned. If fields are in registers we
 5955   // need to allocate an inline type instance and initialize it with
 5956   // the value of the fields.
 5957   Label skip;
 5958   // We only need a new buffered inline type if a new one is not returned
 5959   testptr(rax, 1);
 5960   jcc(Assembler::zero, skip);
 5961   int call_offset = -1;
 5962 
 5963 #ifdef _LP64
 5964   // The following code is similar to allocate_instance but has some slight differences,
 5965   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 5966   // allocating is not necessary if vk != NULL, etc. allocate_instance is not aware of these.
 5967   Label slow_case;
 5968   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 5969   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 5970   if (vk != NULL) {
 5971     // Called from C1, where the return type is statically known.
 5972     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 5973     jint obj_size = vk->layout_helper();
 5974     assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 5975     if (UseTLAB) {
 5976       tlab_allocate(r15_thread, rax, noreg, obj_size, r13, r14, slow_case);
 5977     } else {
 5978       jmp(slow_case);
 5979     }
 5980   } else {
 5981     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 5982     mov(rbx, rax);
 5983     andptr(rbx, -2);
 5984     movl(r14, Address(rbx, Klass::layout_helper_offset()));
 5985     if (UseTLAB) {
 5986       tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case);
 5987     } else {
 5988       jmp(slow_case);
 5989     }
 5990   }
 5991   if (UseTLAB) {
 5992     // 2. Initialize buffered inline instance header
 5993     Register buffer_obj = rax;
 5994     movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 5995     xorl(r13, r13);
 5996     store_klass_gap(buffer_obj, r13);
 5997     if (vk == NULL) {
 5998       // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 5999       mov(r13, rbx);
 6000     }
 6001     store_klass(buffer_obj, rbx, rscratch1);
 6002     // 3. Initialize its fields with an inline class specific handler
 6003     if (vk != NULL) {
 6004       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 6005     } else {
 6006       movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 6007       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 6008       call(rbx);
 6009     }
 6010     jmp(skip);
 6011   }
 6012   bind(slow_case);
 6013   // We failed to allocate a new inline type, fall back to a runtime
 6014   // call. Some oop field may be live in some registers but we can't
 6015   // tell. That runtime call will take care of preserving them
 6016   // across a GC if there's one.
 6017   mov(rax, rscratch1);
 6018 #endif
 6019 
 6020   if (from_interpreter) {
 6021     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 6022   } else {
 6023     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 6024     call_offset = offset();
 6025   }
 6026 
 6027   bind(skip);
 6028   return call_offset;
 6029 }
 6030 
 6031 // Move a value between registers/stack slots and update the reg_state
 6032 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6033   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6034   if (reg_state[to->value()] == reg_written) {
 6035     return true; // Already written
 6036   }
 6037   if (from != to && bt != T_VOID) {
 6038     if (reg_state[to->value()] == reg_readonly) {
 6039       return false; // Not yet writable
 6040     }
 6041     if (from->is_reg()) {
 6042       if (to->is_reg()) {
 6043         if (from->is_XMMRegister()) {
 6044           if (bt == T_DOUBLE) {
 6045             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6046           } else {
 6047             assert(bt == T_FLOAT, "must be float");
 6048             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6049           }
 6050         } else {
 6051           movq(to->as_Register(), from->as_Register());
 6052         }
 6053       } else {
 6054         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6055         Address to_addr = Address(rsp, st_off);
 6056         if (from->is_XMMRegister()) {
 6057           if (bt == T_DOUBLE) {
 6058             movdbl(to_addr, from->as_XMMRegister());
 6059           } else {
 6060             assert(bt == T_FLOAT, "must be float");
 6061             movflt(to_addr, from->as_XMMRegister());
 6062           }
 6063         } else {
 6064           movq(to_addr, from->as_Register());
 6065         }
 6066       }
 6067     } else {
 6068       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6069       if (to->is_reg()) {
 6070         if (to->is_XMMRegister()) {
 6071           if (bt == T_DOUBLE) {
 6072             movdbl(to->as_XMMRegister(), from_addr);
 6073           } else {
 6074             assert(bt == T_FLOAT, "must be float");
 6075             movflt(to->as_XMMRegister(), from_addr);
 6076           }
 6077         } else {
 6078           movq(to->as_Register(), from_addr);
 6079         }
 6080       } else {
 6081         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6082         movq(r13, from_addr);
 6083         movq(Address(rsp, st_off), r13);
 6084       }
 6085     }
 6086   }
 6087   // Update register states
 6088   reg_state[from->value()] = reg_writable;
 6089   reg_state[to->value()] = reg_written;
 6090   return true;
 6091 }
 6092 
 6093 // Calculate the extra stack space required for packing or unpacking inline
 6094 // args and adjust the stack pointer
 6095 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6096   // Two additional slots to account for return address
 6097   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6098   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6099   // Save the return address, adjust the stack (make sure it is properly
 6100   // 16-byte aligned) and copy the return address to the new top of the stack.
 6101   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6102   assert(sp_inc > 0, "sanity");
 6103   pop(r13);
 6104   subptr(rsp, sp_inc);
 6105   push(r13);
 6106   return sp_inc;
 6107 }
 6108 
 6109 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6110 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6111                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6112                                           RegState reg_state[]) {
 6113   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6114   assert(from->is_valid(), "source must be valid");
 6115   bool progress = false;
 6116 #ifdef ASSERT
 6117   const int start_offset = offset();
 6118 #endif
 6119 
 6120   Label L_null, L_notNull;
 6121   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6122   Register tmp1 = r10;
 6123   Register tmp2 = r13;
 6124   Register fromReg = noreg;
 6125   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
 6126   bool done = true;
 6127   bool mark_done = true;
 6128   VMReg toReg;
 6129   BasicType bt;
 6130   // Check if argument requires a null check
 6131   bool null_check = false;
 6132   VMReg nullCheckReg;
 6133   while (stream.next(nullCheckReg, bt)) {
 6134     if (sig->at(stream.sig_index())._offset == -1) {
 6135       null_check = true;
 6136       break;
 6137     }
 6138   }
 6139   stream.reset(sig_index, to_index);
 6140   while (stream.next(toReg, bt)) {
 6141     assert(toReg->is_valid(), "destination must be valid");
 6142     int idx = (int)toReg->value();
 6143     if (reg_state[idx] == reg_readonly) {
 6144       if (idx != from->value()) {
 6145         mark_done = false;
 6146       }
 6147       done = false;
 6148       continue;
 6149     } else if (reg_state[idx] == reg_written) {
 6150       continue;
 6151     }
 6152     assert(reg_state[idx] == reg_writable, "must be writable");
 6153     reg_state[idx] = reg_written;
 6154     progress = true;
 6155 
 6156     if (fromReg == noreg) {
 6157       if (from->is_reg()) {
 6158         fromReg = from->as_Register();
 6159       } else {
 6160         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6161         movq(tmp1, Address(rsp, st_off));
 6162         fromReg = tmp1;
 6163       }
 6164       if (null_check) {
 6165         // Nullable inline type argument, emit null check
 6166         testptr(fromReg, fromReg);
 6167         jcc(Assembler::zero, L_null);
 6168       }
 6169     }
 6170     int off = sig->at(stream.sig_index())._offset;
 6171     if (off == -1) {
 6172       assert(null_check, "Missing null check at");
 6173       if (toReg->is_stack()) {
 6174         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6175         movq(Address(rsp, st_off), 1);
 6176       } else {
 6177         movq(toReg->as_Register(), 1);
 6178       }
 6179       continue;
 6180     }
 6181     assert(off > 0, "offset in object should be positive");
 6182     Address fromAddr = Address(fromReg, off);
 6183     if (!toReg->is_XMMRegister()) {
 6184       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6185       if (is_reference_type(bt)) {
 6186         load_heap_oop(dst, fromAddr);
 6187       } else {
 6188         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6189         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6190       }
 6191       if (toReg->is_stack()) {
 6192         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6193         movq(Address(rsp, st_off), dst);
 6194       }
 6195     } else if (bt == T_DOUBLE) {
 6196       movdbl(toReg->as_XMMRegister(), fromAddr);
 6197     } else {
 6198       assert(bt == T_FLOAT, "must be float");
 6199       movflt(toReg->as_XMMRegister(), fromAddr);
 6200     }
 6201   }
 6202   if (progress && null_check) {
 6203     if (done) {
 6204       jmp(L_notNull);
 6205       bind(L_null);
 6206       // Set IsInit field to zero to signal that the argument is null.
 6207       // Also set all oop fields to zero to make the GC happy.
 6208       stream.reset(sig_index, to_index);
 6209       while (stream.next(toReg, bt)) {
 6210         if (sig->at(stream.sig_index())._offset == -1 ||
 6211             bt == T_OBJECT || bt == T_ARRAY) {
 6212           if (toReg->is_stack()) {
 6213             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6214             movq(Address(rsp, st_off), 0);
 6215           } else {
 6216             xorq(toReg->as_Register(), toReg->as_Register());
 6217           }
 6218         }
 6219       }
 6220       bind(L_notNull);
 6221     } else {
 6222       bind(L_null);
 6223     }
 6224   }
 6225 
 6226   sig_index = stream.sig_index();
 6227   to_index = stream.regs_index();
 6228 
 6229   if (mark_done && reg_state[from->value()] != reg_written) {
 6230     // This is okay because no one else will write to that slot
 6231     reg_state[from->value()] = reg_writable;
 6232   }
 6233   from_index--;
 6234   assert(progress || (start_offset == offset()), "should not emit code");
 6235   return done;
 6236 }
 6237 
 6238 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6239                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6240                                         RegState reg_state[], Register val_array) {
 6241   assert(sig->at(sig_index)._bt == T_PRIMITIVE_OBJECT, "should be at end delimiter");
 6242   assert(to->is_valid(), "destination must be valid");
 6243 
 6244   if (reg_state[to->value()] == reg_written) {
 6245     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6246     return true; // Already written
 6247   }
 6248 
 6249   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6250   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6251   Register val_obj_tmp = r11;
 6252   Register from_reg_tmp = r14;
 6253   Register tmp1 = r10;
 6254   Register tmp2 = r13;
 6255   Register tmp3 = rbx;
 6256   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6257 
 6258   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6259 
 6260   if (reg_state[to->value()] == reg_readonly) {
 6261     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6262       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6263       return false; // Not yet writable
 6264     }
 6265     val_obj = val_obj_tmp;
 6266   }
 6267 
 6268   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_PRIMITIVE_OBJECT);
 6269   load_heap_oop(val_obj, Address(val_array, index));
 6270 
 6271   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6272   VMReg fromReg;
 6273   BasicType bt;
 6274   Label L_null;
 6275   while (stream.next(fromReg, bt)) {
 6276     assert(fromReg->is_valid(), "source must be valid");
 6277     reg_state[fromReg->value()] = reg_writable;
 6278 
 6279     int off = sig->at(stream.sig_index())._offset;
 6280     if (off == -1) {
 6281       // Nullable inline type argument, emit null check
 6282       Label L_notNull;
 6283       if (fromReg->is_stack()) {
 6284         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6285         testb(Address(rsp, ld_off), 1);
 6286       } else {
 6287         testb(fromReg->as_Register(), 1);
 6288       }
 6289       jcc(Assembler::notZero, L_notNull);
 6290       movptr(val_obj, 0);
 6291       jmp(L_null);
 6292       bind(L_notNull);
 6293       continue;
 6294     }
 6295 
 6296     assert(off > 0, "offset in object should be positive");
 6297     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6298 
 6299     Address dst(val_obj, off);
 6300     if (!fromReg->is_XMMRegister()) {
 6301       Register src;
 6302       if (fromReg->is_stack()) {
 6303         src = from_reg_tmp;
 6304         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6305         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6306       } else {
 6307         src = fromReg->as_Register();
 6308       }
 6309       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6310       if (is_reference_type(bt)) {
 6311         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6312       } else {
 6313         store_sized_value(dst, src, size_in_bytes);
 6314       }
 6315     } else if (bt == T_DOUBLE) {
 6316       movdbl(dst, fromReg->as_XMMRegister());
 6317     } else {
 6318       assert(bt == T_FLOAT, "must be float");
 6319       movflt(dst, fromReg->as_XMMRegister());
 6320     }
 6321   }
 6322   bind(L_null);
 6323   sig_index = stream.sig_index();
 6324   from_index = stream.regs_index();
 6325 
 6326   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6327   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6328   assert(success, "to register must be writeable");
 6329   return true;
 6330 }
 6331 
 6332 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6333   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6334 }
 6335 
 6336 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6337   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6338   if (needs_stack_repair) {
 6339     movq(rbp, Address(rsp, initial_framesize));
 6340     // The stack increment resides just below the saved rbp
 6341     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6342   } else {
 6343     if (initial_framesize > 0) {
 6344       addq(rsp, initial_framesize);
 6345     }
 6346     pop(rbp);
 6347   }
 6348 }
 6349 
 6350 // Clearing constant sized memory using YMM/ZMM registers.
 6351 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6352   assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), "");
 6353   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6354 
 6355   int vector64_count = (cnt & (~0x7)) >> 3;
 6356   cnt = cnt & 0x7;
 6357   const int fill64_per_loop = 4;
 6358   const int max_unrolled_fill64 = 8;
 6359 
 6360   // 64 byte initialization loop.
 6361   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 6362   int start64 = 0;
 6363   if (vector64_count > max_unrolled_fill64) {
 6364     Label LOOP;
 6365     Register index = rtmp;
 6366 
 6367     start64 = vector64_count - (vector64_count % fill64_per_loop);
 6368 
 6369     movl(index, 0);

 6419         break;
 6420       case 7:
 6421         if (use64byteVector) {
 6422           movl(rtmp, 0x7F);
 6423           kmovwl(mask, rtmp);
 6424           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6425         } else {
 6426           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6427           movl(rtmp, 0x7);
 6428           kmovwl(mask, rtmp);
 6429           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6430         }
 6431         break;
 6432       default:
 6433         fatal("Unexpected length : %d\n",cnt);
 6434         break;
 6435     }
 6436   }
 6437 }
 6438 
 6439 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6440                                bool is_large, bool word_copy_only, KRegister mask) {
 6441   // cnt      - number of qwords (8-byte words).
 6442   // base     - start address, qword aligned.
 6443   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6444   assert(base==rdi, "base register must be edi for rep stos");
 6445   assert(val==rax,   "val register must be eax for rep stos");
 6446   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6447   assert(InitArrayShortSize % BytesPerLong == 0,
 6448     "InitArrayShortSize should be the multiple of BytesPerLong");
 6449 
 6450   Label DONE;



 6451 
 6452   if (!is_large) {
 6453     Label LOOP, LONG;
 6454     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6455     jccb(Assembler::greater, LONG);
 6456 
 6457     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6458 
 6459     decrement(cnt);
 6460     jccb(Assembler::negative, DONE); // Zero length
 6461 
 6462     // Use individual pointer-sized stores for small counts:
 6463     BIND(LOOP);
 6464     movptr(Address(base, cnt, Address::times_ptr), val);
 6465     decrement(cnt);
 6466     jccb(Assembler::greaterEqual, LOOP);
 6467     jmpb(DONE);
 6468 
 6469     BIND(LONG);
 6470   }
 6471 
 6472   // Use longer rep-prefixed ops for non-small counts:
 6473   if (UseFastStosb && !word_copy_only) {
 6474     shlptr(cnt, 3); // convert to number of bytes
 6475     rep_stosb();
 6476   } else if (UseXMMForObjInit) {
 6477     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6478   } else {
 6479     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6480     rep_stos();
 6481   }
 6482 
 6483   BIND(DONE);
 6484 }
 6485 
 6486 #endif //COMPILER2_OR_JVMCI
 6487 
 6488 
 6489 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6490                                    Register to, Register value, Register count,
 6491                                    Register rtmp, XMMRegister xtmp) {
 6492   ShortBranchVerifier sbv(this);
 6493   assert_different_registers(to, value, count, rtmp);
 6494   Label L_exit;
 6495   Label L_fill_2_bytes, L_fill_4_bytes;
 6496 
 6497 #if defined(COMPILER2) && defined(_LP64)
< prev index next >