< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "precompiled.hpp"
   26 #include "asm/assembler.hpp"
   27 #include "asm/assembler.inline.hpp"
   28 #include "compiler/compiler_globals.hpp"
   29 #include "compiler/disassembler.hpp"

   30 #include "crc32c.h"
   31 #include "gc/shared/barrierSet.hpp"
   32 #include "gc/shared/barrierSetAssembler.hpp"
   33 #include "gc/shared/collectedHeap.inline.hpp"
   34 #include "gc/shared/tlab_globals.hpp"
   35 #include "interpreter/bytecodeHistogram.hpp"
   36 #include "interpreter/interpreter.hpp"
   37 #include "jvm.h"
   38 #include "memory/resourceArea.hpp"
   39 #include "memory/universe.hpp"
   40 #include "oops/accessDecorators.hpp"
   41 #include "oops/compressedKlass.inline.hpp"
   42 #include "oops/compressedOops.inline.hpp"
   43 #include "oops/klass.inline.hpp"

   44 #include "prims/methodHandles.hpp"
   45 #include "runtime/continuation.hpp"
   46 #include "runtime/interfaceSupport.inline.hpp"
   47 #include "runtime/javaThread.hpp"
   48 #include "runtime/jniHandles.hpp"
   49 #include "runtime/objectMonitor.hpp"
   50 #include "runtime/os.hpp"
   51 #include "runtime/safepoint.hpp"
   52 #include "runtime/safepointMechanism.hpp"
   53 #include "runtime/sharedRuntime.hpp"

   54 #include "runtime/stubRoutines.hpp"
   55 #include "utilities/checkedCast.hpp"
   56 #include "utilities/macros.hpp"




   57 
   58 #ifdef PRODUCT
   59 #define BLOCK_COMMENT(str) /* nothing */
   60 #define STOP(error) stop(error)
   61 #else
   62 #define BLOCK_COMMENT(str) block_comment(str)
   63 #define STOP(error) block_comment(error); stop(error)
   64 #endif
   65 
   66 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   67 
   68 #ifdef ASSERT
   69 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   70 #endif
   71 
   72 static const Assembler::Condition reverse[] = {
   73     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   74     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   75     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   76     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1663 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1664   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1665   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1666   pass_arg2(this, arg_2);
 1667   pass_arg1(this, arg_1);
 1668   pass_arg0(this, arg_0);
 1669   call_VM_leaf(entry_point, 3);
 1670 }
 1671 
 1672 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1673   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3));
 1674   LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3));
 1675   LP64_ONLY(assert_different_registers(arg_2, c_rarg3));
 1676   pass_arg3(this, arg_3);
 1677   pass_arg2(this, arg_2);
 1678   pass_arg1(this, arg_1);
 1679   pass_arg0(this, arg_0);
 1680   call_VM_leaf(entry_point, 3);
 1681 }
 1682 




 1683 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1684   pass_arg0(this, arg_0);
 1685   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1686 }
 1687 
 1688 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1689   LP64_ONLY(assert_different_registers(arg_0, c_rarg1));
 1690   pass_arg1(this, arg_1);
 1691   pass_arg0(this, arg_0);
 1692   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1693 }
 1694 
 1695 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1696   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1697   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1698   pass_arg2(this, arg_2);
 1699   pass_arg1(this, arg_1);
 1700   pass_arg0(this, arg_0);
 1701   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1702 }

 2837     lea(rscratch, src);
 2838     Assembler::mulss(dst, Address(rscratch, 0));
 2839   }
 2840 }
 2841 
 2842 void MacroAssembler::null_check(Register reg, int offset) {
 2843   if (needs_explicit_null_check(offset)) {
 2844     // provoke OS null exception if reg is null by
 2845     // accessing M[reg] w/o changing any (non-CC) registers
 2846     // NOTE: cmpl is plenty here to provoke a segv
 2847     cmpptr(rax, Address(reg, 0));
 2848     // Note: should probably use testl(rax, Address(reg, 0));
 2849     //       may be shorter code (however, this version of
 2850     //       testl needs to be implemented first)
 2851   } else {
 2852     // nothing to do, (later) access of M[reg + offset]
 2853     // will provoke OS null exception if reg is null
 2854   }
 2855 }
 2856 












































































































































 2857 void MacroAssembler::os_breakpoint() {
 2858   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2859   // (e.g., MSVC can't call ps() otherwise)
 2860   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2861 }
 2862 
 2863 void MacroAssembler::unimplemented(const char* what) {
 2864   const char* buf = nullptr;
 2865   {
 2866     ResourceMark rm;
 2867     stringStream ss;
 2868     ss.print("unimplemented: %s", what);
 2869     buf = code_string(ss.as_string());
 2870   }
 2871   stop(buf);
 2872 }
 2873 
 2874 #ifdef _LP64
 2875 #define XSTATE_BV 0x200
 2876 #endif

 3941 }
 3942 
 3943 // C++ bool manipulation
 3944 void MacroAssembler::testbool(Register dst) {
 3945   if(sizeof(bool) == 1)
 3946     testb(dst, 0xff);
 3947   else if(sizeof(bool) == 2) {
 3948     // testw implementation needed for two byte bools
 3949     ShouldNotReachHere();
 3950   } else if(sizeof(bool) == 4)
 3951     testl(dst, dst);
 3952   else
 3953     // unsupported
 3954     ShouldNotReachHere();
 3955 }
 3956 
 3957 void MacroAssembler::testptr(Register dst, Register src) {
 3958   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 3959 }
 3960 


















































































































 3961 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3962 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 3963                                    Register var_size_in_bytes,
 3964                                    int con_size_in_bytes,
 3965                                    Register t1,
 3966                                    Register t2,
 3967                                    Label& slow_case) {
 3968   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3969   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3970 }
 3971 
 3972 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3973   RegSet regs;
 3974 #ifdef _LP64
 3975   regs += RegSet::of(rax, rcx, rdx);
 3976 #ifndef WINDOWS
 3977   regs += RegSet::of(rsi, rdi);
 3978 #endif
 3979   regs += RegSet::range(r8, r11);
 3980 #else

 4193     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4194     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4195     // index could be 0 now, must check again
 4196     jcc(Assembler::zero, done);
 4197     bind(even);
 4198   }
 4199 #endif // !_LP64
 4200   // initialize remaining object fields: index is a multiple of 2 now
 4201   {
 4202     Label loop;
 4203     bind(loop);
 4204     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4205     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4206     decrement(index);
 4207     jcc(Assembler::notZero, loop);
 4208   }
 4209 
 4210   bind(done);
 4211 }
 4212 


















































 4213 // Look up the method for a megamorphic invokeinterface call.
 4214 // The target method is determined by <intf_klass, itable_index>.
 4215 // The receiver klass is in recv_klass.
 4216 // On success, the result will be in method_result, and execution falls through.
 4217 // On failure, execution transfers to the given label.
 4218 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4219                                              Register intf_klass,
 4220                                              RegisterOrConstant itable_index,
 4221                                              Register method_result,
 4222                                              Register scan_temp,
 4223                                              Label& L_no_such_interface,
 4224                                              bool return_method) {
 4225   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4226   assert_different_registers(method_result, intf_klass, scan_temp);
 4227   assert(recv_klass != method_result || !return_method,
 4228          "recv_klass can be destroyed when method isn't needed");
 4229 
 4230   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4231          "caller must use same register for non-constant itable index as for method");
 4232 

 4660   } else {
 4661     Label L;
 4662     jccb(negate_condition(cc), L);
 4663     movl(dst, src);
 4664     bind(L);
 4665   }
 4666 }
 4667 
 4668 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4669   if (VM_Version::supports_cmov()) {
 4670     cmovl(cc, dst, src);
 4671   } else {
 4672     Label L;
 4673     jccb(negate_condition(cc), L);
 4674     movl(dst, src);
 4675     bind(L);
 4676   }
 4677 }
 4678 
 4679 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4680   if (!VerifyOops) return;




 4681 
 4682   BLOCK_COMMENT("verify_oop {");
 4683 #ifdef _LP64
 4684   push(rscratch1);
 4685 #endif
 4686   push(rax);                          // save rax
 4687   push(reg);                          // pass register argument
 4688 
 4689   // Pass register number to verify_oop_subroutine
 4690   const char* b = nullptr;
 4691   {
 4692     ResourceMark rm;
 4693     stringStream ss;
 4694     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4695     b = code_string(ss.as_string());
 4696   }
 4697   ExternalAddress buffer((address) b);
 4698   pushptr(buffer.addr(), rscratch1);
 4699 
 4700   // call indirectly to solve generation ordering problem

 4722   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4723   int stackElementSize = Interpreter::stackElementSize;
 4724   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4725 #ifdef ASSERT
 4726   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4727   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4728 #endif
 4729   Register             scale_reg    = noreg;
 4730   Address::ScaleFactor scale_factor = Address::no_scale;
 4731   if (arg_slot.is_constant()) {
 4732     offset += arg_slot.as_constant() * stackElementSize;
 4733   } else {
 4734     scale_reg    = arg_slot.as_register();
 4735     scale_factor = Address::times(stackElementSize);
 4736   }
 4737   offset += wordSize;           // return PC is on stack
 4738   return Address(rsp, scale_reg, scale_factor, offset);
 4739 }
 4740 
 4741 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4742   if (!VerifyOops) return;




 4743 
 4744 #ifdef _LP64
 4745   push(rscratch1);
 4746 #endif
 4747   push(rax); // save rax,
 4748   // addr may contain rsp so we will have to adjust it based on the push
 4749   // we just did (and on 64 bit we do two pushes)
 4750   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4751   // stores rax into addr which is backwards of what was intended.
 4752   if (addr.uses(rsp)) {
 4753     lea(rax, addr);
 4754     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 4755   } else {
 4756     pushptr(addr);
 4757   }
 4758 
 4759   // Pass register number to verify_oop_subroutine
 4760   const char* b = nullptr;
 4761   {
 4762     ResourceMark rm;

 5209 
 5210 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5211   // get mirror
 5212   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5213   load_method_holder(mirror, method);
 5214   movptr(mirror, Address(mirror, mirror_offset));
 5215   resolve_oop_handle(mirror, tmp);
 5216 }
 5217 
 5218 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5219   load_method_holder(rresult, rmethod);
 5220   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5221 }
 5222 
 5223 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5224   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5225   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5226   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5227 }
 5228 








 5229 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5230   assert_different_registers(src, tmp);
 5231   assert_different_registers(dst, tmp);
 5232 #ifdef _LP64
 5233   if (UseCompressedClassPointers) {
 5234     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5235     decode_klass_not_null(dst, tmp);
 5236   } else
 5237 #endif
 5238     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));





 5239 }
 5240 
 5241 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5242   assert_different_registers(src, tmp);
 5243   assert_different_registers(dst, tmp);
 5244 #ifdef _LP64
 5245   if (UseCompressedClassPointers) {
 5246     encode_klass_not_null(src, tmp);
 5247     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5248   } else
 5249 #endif
 5250     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5251 }
 5252 
 5253 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5254                                     Register tmp1, Register thread_tmp) {
 5255   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5256   decorators = AccessInternal::decorator_fixup(decorators, type);
 5257   bool as_raw = (decorators & AS_RAW) != 0;
 5258   if (as_raw) {
 5259     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5260   } else {
 5261     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5262   }
 5263 }
 5264 
 5265 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5266                                      Register tmp1, Register tmp2, Register tmp3) {
 5267   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5268   decorators = AccessInternal::decorator_fixup(decorators, type);
 5269   bool as_raw = (decorators & AS_RAW) != 0;
 5270   if (as_raw) {
 5271     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5272   } else {
 5273     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5274   }
 5275 }
 5276 








































 5277 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 5278                                    Register thread_tmp, DecoratorSet decorators) {
 5279   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 5280 }
 5281 
 5282 // Doesn't do verification, generates fixed size code
 5283 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 5284                                             Register thread_tmp, DecoratorSet decorators) {
 5285   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 5286 }
 5287 
 5288 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5289                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5290   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5291 }
 5292 
 5293 // Used for storing nulls.
 5294 void MacroAssembler::store_heap_oop_null(Address dst) {
 5295   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5296 }

 5596 
 5597 void MacroAssembler::reinit_heapbase() {
 5598   if (UseCompressedOops) {
 5599     if (Universe::heap() != nullptr) {
 5600       if (CompressedOops::base() == nullptr) {
 5601         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5602       } else {
 5603         mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());
 5604       }
 5605     } else {
 5606       movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
 5607     }
 5608   }
 5609 }
 5610 
 5611 #endif // _LP64
 5612 
 5613 #if COMPILER2_OR_JVMCI
 5614 
 5615 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5616 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5617   // cnt - number of qwords (8-byte words).
 5618   // base - start address, qword aligned.
 5619   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5620   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5621   if (use64byteVector) {
 5622     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
 5623   } else if (MaxVectorSize >= 32) {
 5624     vpxor(xtmp, xtmp, xtmp, AVX_256bit);


 5625   } else {
 5626     pxor(xtmp, xtmp);

 5627   }
 5628   jmp(L_zero_64_bytes);
 5629 
 5630   BIND(L_loop);
 5631   if (MaxVectorSize >= 32) {
 5632     fill64(base, 0, xtmp, use64byteVector);
 5633   } else {
 5634     movdqu(Address(base,  0), xtmp);
 5635     movdqu(Address(base, 16), xtmp);
 5636     movdqu(Address(base, 32), xtmp);
 5637     movdqu(Address(base, 48), xtmp);
 5638   }
 5639   addptr(base, 64);
 5640 
 5641   BIND(L_zero_64_bytes);
 5642   subptr(cnt, 8);
 5643   jccb(Assembler::greaterEqual, L_loop);
 5644 
 5645   // Copy trailing 64 bytes
 5646   if (use64byteVector) {
 5647     addptr(cnt, 8);
 5648     jccb(Assembler::equal, L_end);
 5649     fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
 5650     jmp(L_end);
 5651   } else {
 5652     addptr(cnt, 4);
 5653     jccb(Assembler::less, L_tail);
 5654     if (MaxVectorSize >= 32) {
 5655       vmovdqu(Address(base, 0), xtmp);
 5656     } else {
 5657       movdqu(Address(base,  0), xtmp);
 5658       movdqu(Address(base, 16), xtmp);
 5659     }
 5660   }
 5661   addptr(base, 32);
 5662   subptr(cnt, 4);
 5663 
 5664   BIND(L_tail);
 5665   addptr(cnt, 4);
 5666   jccb(Assembler::lessEqual, L_end);
 5667   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5668     fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
 5669   } else {
 5670     decrement(cnt);
 5671 
 5672     BIND(L_sloop);
 5673     movq(Address(base, 0), xtmp);
 5674     addptr(base, 8);
 5675     decrement(cnt);
 5676     jccb(Assembler::greaterEqual, L_sloop);
 5677   }
 5678   BIND(L_end);
 5679 }
 5680 














































































































































































































































































































































































































 5681 // Clearing constant sized memory using YMM/ZMM registers.
 5682 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5683   assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), "");
 5684   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 5685 
 5686   int vector64_count = (cnt & (~0x7)) >> 3;
 5687   cnt = cnt & 0x7;
 5688   const int fill64_per_loop = 4;
 5689   const int max_unrolled_fill64 = 8;
 5690 
 5691   // 64 byte initialization loop.
 5692   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 5693   int start64 = 0;
 5694   if (vector64_count > max_unrolled_fill64) {
 5695     Label LOOP;
 5696     Register index = rtmp;
 5697 
 5698     start64 = vector64_count - (vector64_count % fill64_per_loop);
 5699 
 5700     movl(index, 0);

 5750         break;
 5751       case 7:
 5752         if (use64byteVector) {
 5753           movl(rtmp, 0x7F);
 5754           kmovwl(mask, rtmp);
 5755           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5756         } else {
 5757           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5758           movl(rtmp, 0x7);
 5759           kmovwl(mask, rtmp);
 5760           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 5761         }
 5762         break;
 5763       default:
 5764         fatal("Unexpected length : %d\n",cnt);
 5765         break;
 5766     }
 5767   }
 5768 }
 5769 
 5770 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
 5771                                bool is_large, KRegister mask) {
 5772   // cnt      - number of qwords (8-byte words).
 5773   // base     - start address, qword aligned.
 5774   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 5775   assert(base==rdi, "base register must be edi for rep stos");
 5776   assert(tmp==rax,   "tmp register must be eax for rep stos");
 5777   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 5778   assert(InitArrayShortSize % BytesPerLong == 0,
 5779     "InitArrayShortSize should be the multiple of BytesPerLong");
 5780 
 5781   Label DONE;
 5782   if (!is_large || !UseXMMForObjInit) {
 5783     xorptr(tmp, tmp);
 5784   }
 5785 
 5786   if (!is_large) {
 5787     Label LOOP, LONG;
 5788     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 5789     jccb(Assembler::greater, LONG);
 5790 
 5791     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 5792 
 5793     decrement(cnt);
 5794     jccb(Assembler::negative, DONE); // Zero length
 5795 
 5796     // Use individual pointer-sized stores for small counts:
 5797     BIND(LOOP);
 5798     movptr(Address(base, cnt, Address::times_ptr), tmp);
 5799     decrement(cnt);
 5800     jccb(Assembler::greaterEqual, LOOP);
 5801     jmpb(DONE);
 5802 
 5803     BIND(LONG);
 5804   }
 5805 
 5806   // Use longer rep-prefixed ops for non-small counts:
 5807   if (UseFastStosb) {
 5808     shlptr(cnt, 3); // convert to number of bytes
 5809     rep_stosb();
 5810   } else if (UseXMMForObjInit) {
 5811     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
 5812   } else {
 5813     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 5814     rep_stos();
 5815   }
 5816 
 5817   BIND(DONE);
 5818 }
 5819 
 5820 #endif //COMPILER2_OR_JVMCI
 5821 
 5822 
 5823 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 5824                                    Register to, Register value, Register count,
 5825                                    Register rtmp, XMMRegister xtmp) {
 5826   ShortBranchVerifier sbv(this);
 5827   assert_different_registers(to, value, count, rtmp);
 5828   Label L_exit;
 5829   Label L_fill_2_bytes, L_fill_4_bytes;
 5830 
 5831 #if defined(COMPILER2) && defined(_LP64)

   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "precompiled.hpp"
   26 #include "asm/assembler.hpp"
   27 #include "asm/assembler.inline.hpp"
   28 #include "compiler/compiler_globals.hpp"
   29 #include "compiler/disassembler.hpp"
   30 #include "ci/ciInlineKlass.hpp"
   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "jvm.h"
   39 #include "memory/resourceArea.hpp"
   40 #include "memory/universe.hpp"
   41 #include "oops/accessDecorators.hpp"
   42 #include "oops/compressedKlass.inline.hpp"
   43 #include "oops/compressedOops.inline.hpp"
   44 #include "oops/klass.inline.hpp"
   45 #include "oops/resolvedFieldEntry.hpp"
   46 #include "prims/methodHandles.hpp"
   47 #include "runtime/continuation.hpp"
   48 #include "runtime/interfaceSupport.inline.hpp"
   49 #include "runtime/javaThread.hpp"
   50 #include "runtime/jniHandles.hpp"
   51 #include "runtime/objectMonitor.hpp"
   52 #include "runtime/os.hpp"
   53 #include "runtime/safepoint.hpp"
   54 #include "runtime/safepointMechanism.hpp"
   55 #include "runtime/sharedRuntime.hpp"
   56 #include "runtime/signature_cc.hpp"
   57 #include "runtime/stubRoutines.hpp"
   58 #include "utilities/checkedCast.hpp"
   59 #include "utilities/macros.hpp"
   60 #include "vmreg_x86.inline.hpp"
   61 #ifdef COMPILER2
   62 #include "opto/output.hpp"
   63 #endif
   64 
   65 #ifdef PRODUCT
   66 #define BLOCK_COMMENT(str) /* nothing */
   67 #define STOP(error) stop(error)
   68 #else
   69 #define BLOCK_COMMENT(str) block_comment(str)
   70 #define STOP(error) block_comment(error); stop(error)
   71 #endif
   72 
   73 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   74 
   75 #ifdef ASSERT
   76 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   77 #endif
   78 
   79 static const Assembler::Condition reverse[] = {
   80     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   81     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   82     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   83     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1670 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1671   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1672   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1673   pass_arg2(this, arg_2);
 1674   pass_arg1(this, arg_1);
 1675   pass_arg0(this, arg_0);
 1676   call_VM_leaf(entry_point, 3);
 1677 }
 1678 
 1679 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1680   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3));
 1681   LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3));
 1682   LP64_ONLY(assert_different_registers(arg_2, c_rarg3));
 1683   pass_arg3(this, arg_3);
 1684   pass_arg2(this, arg_2);
 1685   pass_arg1(this, arg_1);
 1686   pass_arg0(this, arg_0);
 1687   call_VM_leaf(entry_point, 3);
 1688 }
 1689 
 1690 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1691   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1692 }
 1693 
 1694 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1695   pass_arg0(this, arg_0);
 1696   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1697 }
 1698 
 1699 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1700   LP64_ONLY(assert_different_registers(arg_0, c_rarg1));
 1701   pass_arg1(this, arg_1);
 1702   pass_arg0(this, arg_0);
 1703   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1704 }
 1705 
 1706 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1707   LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
 1708   LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
 1709   pass_arg2(this, arg_2);
 1710   pass_arg1(this, arg_1);
 1711   pass_arg0(this, arg_0);
 1712   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1713 }

 2848     lea(rscratch, src);
 2849     Assembler::mulss(dst, Address(rscratch, 0));
 2850   }
 2851 }
 2852 
 2853 void MacroAssembler::null_check(Register reg, int offset) {
 2854   if (needs_explicit_null_check(offset)) {
 2855     // provoke OS null exception if reg is null by
 2856     // accessing M[reg] w/o changing any (non-CC) registers
 2857     // NOTE: cmpl is plenty here to provoke a segv
 2858     cmpptr(rax, Address(reg, 0));
 2859     // Note: should probably use testl(rax, Address(reg, 0));
 2860     //       may be shorter code (however, this version of
 2861     //       testl needs to be implemented first)
 2862   } else {
 2863     // nothing to do, (later) access of M[reg + offset]
 2864     // will provoke OS null exception if reg is null
 2865   }
 2866 }
 2867 
 2868 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2869   andptr(markword, markWord::inline_type_mask_in_place);
 2870   cmpptr(markword, markWord::inline_type_pattern);
 2871   jcc(Assembler::equal, is_inline_type);
 2872 }
 2873 
 2874 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
 2875   movl(temp_reg, Address(klass, Klass::access_flags_offset()));
 2876   testl(temp_reg, JVM_ACC_VALUE);
 2877   jcc(Assembler::notZero, is_inline_type);
 2878 }
 2879 
 2880 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
 2881   testptr(object, object);
 2882   jcc(Assembler::zero, not_inline_type);
 2883   const int is_inline_type_mask = markWord::inline_type_pattern;
 2884   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2885   andptr(tmp, is_inline_type_mask);
 2886   cmpptr(tmp, is_inline_type_mask);
 2887   jcc(Assembler::notEqual, not_inline_type);
 2888 }
 2889 
 2890 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
 2891 #ifdef ASSERT
 2892   {
 2893     Label done_check;
 2894     test_klass_is_inline_type(klass, temp_reg, done_check);
 2895     stop("test_klass_is_empty_inline_type with non inline type klass");
 2896     bind(done_check);
 2897   }
 2898 #endif
 2899   movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset()));
 2900   testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value());
 2901   jcc(Assembler::notZero, is_empty_inline_type);
 2902 }
 2903 
 2904 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2905   movl(temp_reg, flags);
 2906   shrl(temp_reg, ResolvedFieldEntry::is_null_free_inline_type_shift);
 2907   andl(temp_reg, 0x1);
 2908   testl(temp_reg, temp_reg);
 2909   jcc(Assembler::notZero, is_null_free_inline_type);
 2910 }
 2911 
 2912 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2913   movl(temp_reg, flags);
 2914   shrl(temp_reg, ResolvedFieldEntry::is_null_free_inline_type_shift);
 2915   andl(temp_reg, 0x1);
 2916   testl(temp_reg, temp_reg);
 2917   jcc(Assembler::zero, not_null_free_inline_type);
 2918 }
 2919 
 2920 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
 2921   movl(temp_reg, flags);
 2922   shrl(temp_reg, ResolvedFieldEntry::is_flat_shift);
 2923   andl(temp_reg, 0x1);
 2924   testl(temp_reg, temp_reg);
 2925   jcc(Assembler::notZero, is_flat);
 2926 }
 2927 
 2928 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2929   Label test_mark_word;
 2930   // load mark word
 2931   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2932   // check displaced
 2933   testl(temp_reg, markWord::unlocked_value);
 2934   jccb(Assembler::notZero, test_mark_word);
 2935   // slow path use klass prototype
 2936   push(rscratch1);
 2937   load_prototype_header(temp_reg, oop, rscratch1);
 2938   pop(rscratch1);
 2939 
 2940   bind(test_mark_word);
 2941   testl(temp_reg, test_bit);
 2942   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2943 }
 2944 
 2945 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
 2946                                          Label& is_flat_array) {
 2947 #ifdef _LP64
 2948   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
 2949 #else
 2950   load_klass(temp_reg, oop, noreg);
 2951   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2952   test_flat_array_layout(temp_reg, is_flat_array);
 2953 #endif
 2954 }
 2955 
 2956 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
 2957                                              Label& is_non_flat_array) {
 2958 #ifdef _LP64
 2959   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
 2960 #else
 2961   load_klass(temp_reg, oop, noreg);
 2962   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2963   test_non_flat_array_layout(temp_reg, is_non_flat_array);
 2964 #endif
 2965 }
 2966 
 2967 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 2968 #ifdef _LP64
 2969   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 2970 #else
 2971   load_klass(temp_reg, oop, noreg);
 2972   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2973   test_null_free_array_layout(temp_reg, is_null_free_array);
 2974 #endif
 2975 }
 2976 
 2977 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 2978 #ifdef _LP64
 2979   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 2980 #else
 2981   load_klass(temp_reg, oop, noreg);
 2982   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2983   test_non_null_free_array_layout(temp_reg, is_non_null_free_array);
 2984 #endif
 2985 }
 2986 
 2987 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
 2988   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2989   jcc(Assembler::notZero, is_flat_array);
 2990 }
 2991 
 2992 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
 2993   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2994   jcc(Assembler::zero, is_non_flat_array);
 2995 }
 2996 
 2997 void MacroAssembler::test_null_free_array_layout(Register lh, Label& is_null_free_array) {
 2998   testl(lh, Klass::_lh_null_free_array_bit_inplace);
 2999   jcc(Assembler::notZero, is_null_free_array);
 3000 }
 3001 
 3002 void MacroAssembler::test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array) {
 3003   testl(lh, Klass::_lh_null_free_array_bit_inplace);
 3004   jcc(Assembler::zero, is_non_null_free_array);
 3005 }
 3006 
 3007 
 3008 void MacroAssembler::os_breakpoint() {
 3009   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 3010   // (e.g., MSVC can't call ps() otherwise)
 3011   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 3012 }
 3013 
 3014 void MacroAssembler::unimplemented(const char* what) {
 3015   const char* buf = nullptr;
 3016   {
 3017     ResourceMark rm;
 3018     stringStream ss;
 3019     ss.print("unimplemented: %s", what);
 3020     buf = code_string(ss.as_string());
 3021   }
 3022   stop(buf);
 3023 }
 3024 
 3025 #ifdef _LP64
 3026 #define XSTATE_BV 0x200
 3027 #endif

 4092 }
 4093 
 4094 // C++ bool manipulation
 4095 void MacroAssembler::testbool(Register dst) {
 4096   if(sizeof(bool) == 1)
 4097     testb(dst, 0xff);
 4098   else if(sizeof(bool) == 2) {
 4099     // testw implementation needed for two byte bools
 4100     ShouldNotReachHere();
 4101   } else if(sizeof(bool) == 4)
 4102     testl(dst, dst);
 4103   else
 4104     // unsupported
 4105     ShouldNotReachHere();
 4106 }
 4107 
 4108 void MacroAssembler::testptr(Register dst, Register src) {
 4109   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 4110 }
 4111 
 4112 // Object / value buffer allocation...
 4113 //
 4114 // Kills klass and rsi on LP64
 4115 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 4116                                        Register t1, Register t2,
 4117                                        bool clear_fields, Label& alloc_failed)
 4118 {
 4119   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 4120   Register layout_size = t1;
 4121   assert(new_obj == rax, "needs to be rax");
 4122   assert_different_registers(klass, new_obj, t1, t2);
 4123 
 4124   // get instance_size in InstanceKlass (scaled to a count of bytes)
 4125   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 4126   // test to see if it has a finalizer or is malformed in some way
 4127   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 4128   jcc(Assembler::notZero, slow_case_no_pop);
 4129 
 4130   // Allocate the instance:
 4131   //  If TLAB is enabled:
 4132   //    Try to allocate in the TLAB.
 4133   //    If fails, go to the slow path.
 4134   //  Else If inline contiguous allocations are enabled:
 4135   //    Try to allocate in eden.
 4136   //    If fails due to heap end, go to slow path.
 4137   //
 4138   //  If TLAB is enabled OR inline contiguous is enabled:
 4139   //    Initialize the allocation.
 4140   //    Exit.
 4141   //
 4142   //  Go to slow path.
 4143 
 4144   push(klass);
 4145   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass);
 4146 #ifndef _LP64
 4147   if (UseTLAB) {
 4148     get_thread(thread);
 4149   }
 4150 #endif // _LP64
 4151 
 4152   if (UseTLAB) {
 4153     tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case);
 4154     if (ZeroTLAB || (!clear_fields)) {
 4155       // the fields have been already cleared
 4156       jmp(initialize_header);
 4157     } else {
 4158       // initialize both the header and fields
 4159       jmp(initialize_object);
 4160     }
 4161   } else {
 4162     jmp(slow_case);
 4163   }
 4164 
 4165   // If UseTLAB is true, the object is created above and there is an initialize need.
 4166   // Otherwise, skip and go to the slow path.
 4167   if (UseTLAB) {
 4168     if (clear_fields) {
 4169       // The object is initialized before the header.  If the object size is
 4170       // zero, go directly to the header initialization.
 4171       bind(initialize_object);
 4172       decrement(layout_size, sizeof(oopDesc));
 4173       jcc(Assembler::zero, initialize_header);
 4174 
 4175       // Initialize topmost object field, divide size by 8, check if odd and
 4176       // test if zero.
 4177       Register zero = klass;
 4178       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 4179       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 4180 
 4181   #ifdef ASSERT
 4182       // make sure instance_size was multiple of 8
 4183       Label L;
 4184       // Ignore partial flag stall after shrl() since it is debug VM
 4185       jcc(Assembler::carryClear, L);
 4186       stop("object size is not multiple of 2 - adjust this code");
 4187       bind(L);
 4188       // must be > 0, no extra check needed here
 4189   #endif
 4190 
 4191       // initialize remaining object fields: instance_size was a multiple of 8
 4192       {
 4193         Label loop;
 4194         bind(loop);
 4195         movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero);
 4196         NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero));
 4197         decrement(layout_size);
 4198         jcc(Assembler::notZero, loop);
 4199       }
 4200     } // clear_fields
 4201 
 4202     // initialize object header only.
 4203     bind(initialize_header);
 4204     pop(klass);
 4205     Register mark_word = t2;
 4206     movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 4207     movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 4208 #ifdef _LP64
 4209     xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 4210     store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 4211 #endif
 4212     movptr(t2, klass);         // preserve klass
 4213     store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 4214 
 4215     jmp(done);
 4216   }
 4217 
 4218   bind(slow_case);
 4219   pop(klass);
 4220   bind(slow_case_no_pop);
 4221   jmp(alloc_failed);
 4222 
 4223   bind(done);
 4224 }
 4225 
 4226 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 4227 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 4228                                    Register var_size_in_bytes,
 4229                                    int con_size_in_bytes,
 4230                                    Register t1,
 4231                                    Register t2,
 4232                                    Label& slow_case) {
 4233   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 4234   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 4235 }
 4236 
 4237 RegSet MacroAssembler::call_clobbered_gp_registers() {
 4238   RegSet regs;
 4239 #ifdef _LP64
 4240   regs += RegSet::of(rax, rcx, rdx);
 4241 #ifndef WINDOWS
 4242   regs += RegSet::of(rsi, rdi);
 4243 #endif
 4244   regs += RegSet::range(r8, r11);
 4245 #else

 4458     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4459     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4460     // index could be 0 now, must check again
 4461     jcc(Assembler::zero, done);
 4462     bind(even);
 4463   }
 4464 #endif // !_LP64
 4465   // initialize remaining object fields: index is a multiple of 2 now
 4466   {
 4467     Label loop;
 4468     bind(loop);
 4469     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4470     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4471     decrement(index);
 4472     jcc(Assembler::notZero, loop);
 4473   }
 4474 
 4475   bind(done);
 4476 }
 4477 
 4478 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) {
 4479   movptr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset()));
 4480 #ifdef ASSERT
 4481   {
 4482     Label done;
 4483     cmpptr(inline_klass, 0);
 4484     jcc(Assembler::notEqual, done);
 4485     stop("get_inline_type_field_klass contains no inline klass");
 4486     bind(done);
 4487   }
 4488 #endif
 4489   movptr(inline_klass, Address(inline_klass, index, Address::times_ptr));
 4490 }
 4491 
 4492 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
 4493 #ifdef ASSERT
 4494   {
 4495     Label done_check;
 4496     test_klass_is_inline_type(inline_klass, temp_reg, done_check);
 4497     stop("get_default_value_oop from non inline type klass");
 4498     bind(done_check);
 4499   }
 4500 #endif
 4501   Register offset = temp_reg;
 4502   // Getting the offset of the pre-allocated default value
 4503   movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
 4504   movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
 4505 
 4506   // Getting the mirror
 4507   movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
 4508   resolve_oop_handle(obj, inline_klass);
 4509 
 4510   // Getting the pre-allocated default value from the mirror
 4511   Address field(obj, offset, Address::times_1);
 4512   load_heap_oop(obj, field);
 4513 }
 4514 
 4515 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
 4516 #ifdef ASSERT
 4517   {
 4518     Label done_check;
 4519     test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
 4520     stop("get_empty_value from non-empty inline klass");
 4521     bind(done_check);
 4522   }
 4523 #endif
 4524   get_default_value_oop(inline_klass, temp_reg, obj);
 4525 }
 4526 
 4527 
 4528 // Look up the method for a megamorphic invokeinterface call.
 4529 // The target method is determined by <intf_klass, itable_index>.
 4530 // The receiver klass is in recv_klass.
 4531 // On success, the result will be in method_result, and execution falls through.
 4532 // On failure, execution transfers to the given label.
 4533 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4534                                              Register intf_klass,
 4535                                              RegisterOrConstant itable_index,
 4536                                              Register method_result,
 4537                                              Register scan_temp,
 4538                                              Label& L_no_such_interface,
 4539                                              bool return_method) {
 4540   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4541   assert_different_registers(method_result, intf_klass, scan_temp);
 4542   assert(recv_klass != method_result || !return_method,
 4543          "recv_klass can be destroyed when method isn't needed");
 4544 
 4545   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4546          "caller must use same register for non-constant itable index as for method");
 4547 

 4975   } else {
 4976     Label L;
 4977     jccb(negate_condition(cc), L);
 4978     movl(dst, src);
 4979     bind(L);
 4980   }
 4981 }
 4982 
 4983 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4984   if (VM_Version::supports_cmov()) {
 4985     cmovl(cc, dst, src);
 4986   } else {
 4987     Label L;
 4988     jccb(negate_condition(cc), L);
 4989     movl(dst, src);
 4990     bind(L);
 4991   }
 4992 }
 4993 
 4994 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4995   if (!VerifyOops || VerifyAdapterSharing) {
 4996     // Below address of the code string confuses VerifyAdapterSharing
 4997     // because it may differ between otherwise equivalent adapters.
 4998     return;
 4999   }
 5000 
 5001   BLOCK_COMMENT("verify_oop {");
 5002 #ifdef _LP64
 5003   push(rscratch1);
 5004 #endif
 5005   push(rax);                          // save rax
 5006   push(reg);                          // pass register argument
 5007 
 5008   // Pass register number to verify_oop_subroutine
 5009   const char* b = nullptr;
 5010   {
 5011     ResourceMark rm;
 5012     stringStream ss;
 5013     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 5014     b = code_string(ss.as_string());
 5015   }
 5016   ExternalAddress buffer((address) b);
 5017   pushptr(buffer.addr(), rscratch1);
 5018 
 5019   // call indirectly to solve generation ordering problem

 5041   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 5042   int stackElementSize = Interpreter::stackElementSize;
 5043   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 5044 #ifdef ASSERT
 5045   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 5046   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 5047 #endif
 5048   Register             scale_reg    = noreg;
 5049   Address::ScaleFactor scale_factor = Address::no_scale;
 5050   if (arg_slot.is_constant()) {
 5051     offset += arg_slot.as_constant() * stackElementSize;
 5052   } else {
 5053     scale_reg    = arg_slot.as_register();
 5054     scale_factor = Address::times(stackElementSize);
 5055   }
 5056   offset += wordSize;           // return PC is on stack
 5057   return Address(rsp, scale_reg, scale_factor, offset);
 5058 }
 5059 
 5060 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5061   if (!VerifyOops || VerifyAdapterSharing) {
 5062     // Below address of the code string confuses VerifyAdapterSharing
 5063     // because it may differ between otherwise equivalent adapters.
 5064     return;
 5065   }
 5066 
 5067 #ifdef _LP64
 5068   push(rscratch1);
 5069 #endif
 5070   push(rax); // save rax,
 5071   // addr may contain rsp so we will have to adjust it based on the push
 5072   // we just did (and on 64 bit we do two pushes)
 5073   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5074   // stores rax into addr which is backwards of what was intended.
 5075   if (addr.uses(rsp)) {
 5076     lea(rax, addr);
 5077     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 5078   } else {
 5079     pushptr(addr);
 5080   }
 5081 
 5082   // Pass register number to verify_oop_subroutine
 5083   const char* b = nullptr;
 5084   {
 5085     ResourceMark rm;

 5532 
 5533 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5534   // get mirror
 5535   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5536   load_method_holder(mirror, method);
 5537   movptr(mirror, Address(mirror, mirror_offset));
 5538   resolve_oop_handle(mirror, tmp);
 5539 }
 5540 
 5541 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5542   load_method_holder(rresult, rmethod);
 5543   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5544 }
 5545 
 5546 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5547   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5548   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5549   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5550 }
 5551 
 5552 void MacroAssembler::load_metadata(Register dst, Register src) {
 5553   if (UseCompressedClassPointers) {
 5554     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5555   } else {
 5556     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5557   }
 5558 }
 5559 
 5560 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5561   assert_different_registers(src, tmp);
 5562   assert_different_registers(dst, tmp);
 5563 #ifdef _LP64
 5564   if (UseCompressedClassPointers) {
 5565     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5566     decode_klass_not_null(dst, tmp);
 5567   } else
 5568 #endif
 5569   movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5570 }
 5571 
 5572 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5573   load_klass(dst, src, tmp);
 5574   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5575 }
 5576 
 5577 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5578   assert_different_registers(src, tmp);
 5579   assert_different_registers(dst, tmp);
 5580 #ifdef _LP64
 5581   if (UseCompressedClassPointers) {
 5582     encode_klass_not_null(src, tmp);
 5583     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5584   } else
 5585 #endif
 5586     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5587 }
 5588 
 5589 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5590                                     Register tmp1, Register thread_tmp) {
 5591   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5592   decorators = AccessInternal::decorator_fixup(decorators, type);
 5593   bool as_raw = (decorators & AS_RAW) != 0;
 5594   if (as_raw) {
 5595     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5596   } else {
 5597     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5598   }
 5599 }
 5600 
 5601 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5602                                      Register tmp1, Register tmp2, Register tmp3) {
 5603   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5604   decorators = AccessInternal::decorator_fixup(decorators, type);
 5605   bool as_raw = (decorators & AS_RAW) != 0;
 5606   if (as_raw) {
 5607     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5608   } else {
 5609     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5610   }
 5611 }
 5612 
 5613 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst,
 5614                                        Register inline_klass) {
 5615   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5616   bs->value_copy(this, decorators, src, dst, inline_klass);
 5617 }
 5618 
 5619 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) {
 5620   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 5621   movl(offset, Address(offset, InlineKlass::first_field_offset_offset()));
 5622 }
 5623 
 5624 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) {
 5625   // ((address) (void*) o) + vk->first_field_offset();
 5626   Register offset = (data == oop) ? rscratch1 : data;
 5627   first_field_offset(inline_klass, offset);
 5628   if (data == oop) {
 5629     addptr(data, offset);
 5630   } else {
 5631     lea(data, Address(oop, offset));
 5632   }
 5633 }
 5634 
 5635 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5636                                                 Register index, Register data) {
 5637   assert(index != rcx, "index needs to shift by rcx");
 5638   assert_different_registers(array, array_klass, index);
 5639   assert_different_registers(rcx, array, index);
 5640 
 5641   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5642   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5643 
 5644   // Klass::layout_helper_log2_element_size(lh)
 5645   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5646   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5647   andl(rcx, Klass::_lh_log2_element_size_mask);
 5648   shlptr(index); // index << rcx
 5649 
 5650   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT)));
 5651 }
 5652 
 5653 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 5654                                    Register thread_tmp, DecoratorSet decorators) {
 5655   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 5656 }
 5657 
 5658 // Doesn't do verification, generates fixed size code
 5659 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 5660                                             Register thread_tmp, DecoratorSet decorators) {
 5661   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 5662 }
 5663 
 5664 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5665                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5666   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5667 }
 5668 
 5669 // Used for storing nulls.
 5670 void MacroAssembler::store_heap_oop_null(Address dst) {
 5671   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5672 }

 5972 
 5973 void MacroAssembler::reinit_heapbase() {
 5974   if (UseCompressedOops) {
 5975     if (Universe::heap() != nullptr) {
 5976       if (CompressedOops::base() == nullptr) {
 5977         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5978       } else {
 5979         mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());
 5980       }
 5981     } else {
 5982       movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
 5983     }
 5984   }
 5985 }
 5986 
 5987 #endif // _LP64
 5988 
 5989 #if COMPILER2_OR_JVMCI
 5990 
 5991 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5992 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 5993   // cnt - number of qwords (8-byte words).
 5994   // base - start address, qword aligned.
 5995   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5996   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5997   if (use64byteVector) {
 5998     evpbroadcastq(xtmp, val, AVX_512bit);
 5999   } else if (MaxVectorSize >= 32) {
 6000     movdq(xtmp, val);
 6001     punpcklqdq(xtmp, xtmp);
 6002     vinserti128_high(xtmp, xtmp);
 6003   } else {
 6004     movdq(xtmp, val);
 6005     punpcklqdq(xtmp, xtmp);
 6006   }
 6007   jmp(L_zero_64_bytes);
 6008 
 6009   BIND(L_loop);
 6010   if (MaxVectorSize >= 32) {
 6011     fill64(base, 0, xtmp, use64byteVector);
 6012   } else {
 6013     movdqu(Address(base,  0), xtmp);
 6014     movdqu(Address(base, 16), xtmp);
 6015     movdqu(Address(base, 32), xtmp);
 6016     movdqu(Address(base, 48), xtmp);
 6017   }
 6018   addptr(base, 64);
 6019 
 6020   BIND(L_zero_64_bytes);
 6021   subptr(cnt, 8);
 6022   jccb(Assembler::greaterEqual, L_loop);
 6023 
 6024   // Copy trailing 64 bytes
 6025   if (use64byteVector) {
 6026     addptr(cnt, 8);
 6027     jccb(Assembler::equal, L_end);
 6028     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 6029     jmp(L_end);
 6030   } else {
 6031     addptr(cnt, 4);
 6032     jccb(Assembler::less, L_tail);
 6033     if (MaxVectorSize >= 32) {
 6034       vmovdqu(Address(base, 0), xtmp);
 6035     } else {
 6036       movdqu(Address(base,  0), xtmp);
 6037       movdqu(Address(base, 16), xtmp);
 6038     }
 6039   }
 6040   addptr(base, 32);
 6041   subptr(cnt, 4);
 6042 
 6043   BIND(L_tail);
 6044   addptr(cnt, 4);
 6045   jccb(Assembler::lessEqual, L_end);
 6046   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 6047     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 6048   } else {
 6049     decrement(cnt);
 6050 
 6051     BIND(L_sloop);
 6052     movq(Address(base, 0), xtmp);
 6053     addptr(base, 8);
 6054     decrement(cnt);
 6055     jccb(Assembler::greaterEqual, L_sloop);
 6056   }
 6057   BIND(L_end);
 6058 }
 6059 
 6060 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 6061   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 6062   // An inline type might be returned. If fields are in registers we
 6063   // need to allocate an inline type instance and initialize it with
 6064   // the value of the fields.
 6065   Label skip;
 6066   // We only need a new buffered inline type if a new one is not returned
 6067   testptr(rax, 1);
 6068   jcc(Assembler::zero, skip);
 6069   int call_offset = -1;
 6070 
 6071 #ifdef _LP64
 6072   // The following code is similar to allocate_instance but has some slight differences,
 6073   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 6074   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
 6075   Label slow_case;
 6076   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 6077   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 6078   if (vk != nullptr) {
 6079     // Called from C1, where the return type is statically known.
 6080     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 6081     jint obj_size = vk->layout_helper();
 6082     assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 6083     if (UseTLAB) {
 6084       tlab_allocate(r15_thread, rax, noreg, obj_size, r13, r14, slow_case);
 6085     } else {
 6086       jmp(slow_case);
 6087     }
 6088   } else {
 6089     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 6090     mov(rbx, rax);
 6091     andptr(rbx, -2);
 6092     movl(r14, Address(rbx, Klass::layout_helper_offset()));
 6093     if (UseTLAB) {
 6094       tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case);
 6095     } else {
 6096       jmp(slow_case);
 6097     }
 6098   }
 6099   if (UseTLAB) {
 6100     // 2. Initialize buffered inline instance header
 6101     Register buffer_obj = rax;
 6102     movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 6103     xorl(r13, r13);
 6104     store_klass_gap(buffer_obj, r13);
 6105     if (vk == nullptr) {
 6106       // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 6107       mov(r13, rbx);
 6108     }
 6109     store_klass(buffer_obj, rbx, rscratch1);
 6110     // 3. Initialize its fields with an inline class specific handler
 6111     if (vk != nullptr) {
 6112       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 6113     } else {
 6114       movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 6115       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 6116       call(rbx);
 6117     }
 6118     jmp(skip);
 6119   }
 6120   bind(slow_case);
 6121   // We failed to allocate a new inline type, fall back to a runtime
 6122   // call. Some oop field may be live in some registers but we can't
 6123   // tell. That runtime call will take care of preserving them
 6124   // across a GC if there's one.
 6125   mov(rax, rscratch1);
 6126 #endif
 6127 
 6128   if (from_interpreter) {
 6129     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 6130   } else {
 6131     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 6132     call_offset = offset();
 6133   }
 6134 
 6135   bind(skip);
 6136   return call_offset;
 6137 }
 6138 
 6139 // Move a value between registers/stack slots and update the reg_state
 6140 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6141   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6142   if (reg_state[to->value()] == reg_written) {
 6143     return true; // Already written
 6144   }
 6145   if (from != to && bt != T_VOID) {
 6146     if (reg_state[to->value()] == reg_readonly) {
 6147       return false; // Not yet writable
 6148     }
 6149     if (from->is_reg()) {
 6150       if (to->is_reg()) {
 6151         if (from->is_XMMRegister()) {
 6152           if (bt == T_DOUBLE) {
 6153             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6154           } else {
 6155             assert(bt == T_FLOAT, "must be float");
 6156             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6157           }
 6158         } else {
 6159           movq(to->as_Register(), from->as_Register());
 6160         }
 6161       } else {
 6162         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6163         Address to_addr = Address(rsp, st_off);
 6164         if (from->is_XMMRegister()) {
 6165           if (bt == T_DOUBLE) {
 6166             movdbl(to_addr, from->as_XMMRegister());
 6167           } else {
 6168             assert(bt == T_FLOAT, "must be float");
 6169             movflt(to_addr, from->as_XMMRegister());
 6170           }
 6171         } else {
 6172           movq(to_addr, from->as_Register());
 6173         }
 6174       }
 6175     } else {
 6176       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6177       if (to->is_reg()) {
 6178         if (to->is_XMMRegister()) {
 6179           if (bt == T_DOUBLE) {
 6180             movdbl(to->as_XMMRegister(), from_addr);
 6181           } else {
 6182             assert(bt == T_FLOAT, "must be float");
 6183             movflt(to->as_XMMRegister(), from_addr);
 6184           }
 6185         } else {
 6186           movq(to->as_Register(), from_addr);
 6187         }
 6188       } else {
 6189         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6190         movq(r13, from_addr);
 6191         movq(Address(rsp, st_off), r13);
 6192       }
 6193     }
 6194   }
 6195   // Update register states
 6196   reg_state[from->value()] = reg_writable;
 6197   reg_state[to->value()] = reg_written;
 6198   return true;
 6199 }
 6200 
 6201 // Calculate the extra stack space required for packing or unpacking inline
 6202 // args and adjust the stack pointer
 6203 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6204   // Two additional slots to account for return address
 6205   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6206   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6207   // Save the return address, adjust the stack (make sure it is properly
 6208   // 16-byte aligned) and copy the return address to the new top of the stack.
 6209   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6210   assert(sp_inc > 0, "sanity");
 6211   pop(r13);
 6212   subptr(rsp, sp_inc);
 6213   push(r13);
 6214   return sp_inc;
 6215 }
 6216 
 6217 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6218 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6219                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6220                                           RegState reg_state[]) {
 6221   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6222   assert(from->is_valid(), "source must be valid");
 6223   bool progress = false;
 6224 #ifdef ASSERT
 6225   const int start_offset = offset();
 6226 #endif
 6227 
 6228   Label L_null, L_notNull;
 6229   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6230   Register tmp1 = r10;
 6231   Register tmp2 = r13;
 6232   Register fromReg = noreg;
 6233   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
 6234   bool done = true;
 6235   bool mark_done = true;
 6236   VMReg toReg;
 6237   BasicType bt;
 6238   // Check if argument requires a null check
 6239   bool null_check = false;
 6240   VMReg nullCheckReg;
 6241   while (stream.next(nullCheckReg, bt)) {
 6242     if (sig->at(stream.sig_index())._offset == -1) {
 6243       null_check = true;
 6244       break;
 6245     }
 6246   }
 6247   stream.reset(sig_index, to_index);
 6248   while (stream.next(toReg, bt)) {
 6249     assert(toReg->is_valid(), "destination must be valid");
 6250     int idx = (int)toReg->value();
 6251     if (reg_state[idx] == reg_readonly) {
 6252       if (idx != from->value()) {
 6253         mark_done = false;
 6254       }
 6255       done = false;
 6256       continue;
 6257     } else if (reg_state[idx] == reg_written) {
 6258       continue;
 6259     }
 6260     assert(reg_state[idx] == reg_writable, "must be writable");
 6261     reg_state[idx] = reg_written;
 6262     progress = true;
 6263 
 6264     if (fromReg == noreg) {
 6265       if (from->is_reg()) {
 6266         fromReg = from->as_Register();
 6267       } else {
 6268         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6269         movq(tmp1, Address(rsp, st_off));
 6270         fromReg = tmp1;
 6271       }
 6272       if (null_check) {
 6273         // Nullable inline type argument, emit null check
 6274         testptr(fromReg, fromReg);
 6275         jcc(Assembler::zero, L_null);
 6276       }
 6277     }
 6278     int off = sig->at(stream.sig_index())._offset;
 6279     if (off == -1) {
 6280       assert(null_check, "Missing null check at");
 6281       if (toReg->is_stack()) {
 6282         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6283         movq(Address(rsp, st_off), 1);
 6284       } else {
 6285         movq(toReg->as_Register(), 1);
 6286       }
 6287       continue;
 6288     }
 6289     assert(off > 0, "offset in object should be positive");
 6290     Address fromAddr = Address(fromReg, off);
 6291     if (!toReg->is_XMMRegister()) {
 6292       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6293       if (is_reference_type(bt)) {
 6294         load_heap_oop(dst, fromAddr);
 6295       } else {
 6296         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6297         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6298       }
 6299       if (toReg->is_stack()) {
 6300         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6301         movq(Address(rsp, st_off), dst);
 6302       }
 6303     } else if (bt == T_DOUBLE) {
 6304       movdbl(toReg->as_XMMRegister(), fromAddr);
 6305     } else {
 6306       assert(bt == T_FLOAT, "must be float");
 6307       movflt(toReg->as_XMMRegister(), fromAddr);
 6308     }
 6309   }
 6310   if (progress && null_check) {
 6311     if (done) {
 6312       jmp(L_notNull);
 6313       bind(L_null);
 6314       // Set IsInit field to zero to signal that the argument is null.
 6315       // Also set all oop fields to zero to make the GC happy.
 6316       stream.reset(sig_index, to_index);
 6317       while (stream.next(toReg, bt)) {
 6318         if (sig->at(stream.sig_index())._offset == -1 ||
 6319             bt == T_OBJECT || bt == T_ARRAY) {
 6320           if (toReg->is_stack()) {
 6321             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6322             movq(Address(rsp, st_off), 0);
 6323           } else {
 6324             xorq(toReg->as_Register(), toReg->as_Register());
 6325           }
 6326         }
 6327       }
 6328       bind(L_notNull);
 6329     } else {
 6330       bind(L_null);
 6331     }
 6332   }
 6333 
 6334   sig_index = stream.sig_index();
 6335   to_index = stream.regs_index();
 6336 
 6337   if (mark_done && reg_state[from->value()] != reg_written) {
 6338     // This is okay because no one else will write to that slot
 6339     reg_state[from->value()] = reg_writable;
 6340   }
 6341   from_index--;
 6342   assert(progress || (start_offset == offset()), "should not emit code");
 6343   return done;
 6344 }
 6345 
 6346 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6347                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6348                                         RegState reg_state[], Register val_array) {
 6349   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
 6350   assert(to->is_valid(), "destination must be valid");
 6351 
 6352   if (reg_state[to->value()] == reg_written) {
 6353     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6354     return true; // Already written
 6355   }
 6356 
 6357   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6358   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6359   Register val_obj_tmp = r11;
 6360   Register from_reg_tmp = r14;
 6361   Register tmp1 = r10;
 6362   Register tmp2 = r13;
 6363   Register tmp3 = rbx;
 6364   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6365 
 6366   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6367 
 6368   if (reg_state[to->value()] == reg_readonly) {
 6369     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6370       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6371       return false; // Not yet writable
 6372     }
 6373     val_obj = val_obj_tmp;
 6374   }
 6375 
 6376   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
 6377   load_heap_oop(val_obj, Address(val_array, index));
 6378 
 6379   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6380   VMReg fromReg;
 6381   BasicType bt;
 6382   Label L_null;
 6383   while (stream.next(fromReg, bt)) {
 6384     assert(fromReg->is_valid(), "source must be valid");
 6385     reg_state[fromReg->value()] = reg_writable;
 6386 
 6387     int off = sig->at(stream.sig_index())._offset;
 6388     if (off == -1) {
 6389       // Nullable inline type argument, emit null check
 6390       Label L_notNull;
 6391       if (fromReg->is_stack()) {
 6392         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6393         testb(Address(rsp, ld_off), 1);
 6394       } else {
 6395         testb(fromReg->as_Register(), 1);
 6396       }
 6397       jcc(Assembler::notZero, L_notNull);
 6398       movptr(val_obj, 0);
 6399       jmp(L_null);
 6400       bind(L_notNull);
 6401       continue;
 6402     }
 6403 
 6404     assert(off > 0, "offset in object should be positive");
 6405     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6406 
 6407     Address dst(val_obj, off);
 6408     if (!fromReg->is_XMMRegister()) {
 6409       Register src;
 6410       if (fromReg->is_stack()) {
 6411         src = from_reg_tmp;
 6412         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6413         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6414       } else {
 6415         src = fromReg->as_Register();
 6416       }
 6417       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6418       if (is_reference_type(bt)) {
 6419         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6420       } else {
 6421         store_sized_value(dst, src, size_in_bytes);
 6422       }
 6423     } else if (bt == T_DOUBLE) {
 6424       movdbl(dst, fromReg->as_XMMRegister());
 6425     } else {
 6426       assert(bt == T_FLOAT, "must be float");
 6427       movflt(dst, fromReg->as_XMMRegister());
 6428     }
 6429   }
 6430   bind(L_null);
 6431   sig_index = stream.sig_index();
 6432   from_index = stream.regs_index();
 6433 
 6434   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6435   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6436   assert(success, "to register must be writeable");
 6437   return true;
 6438 }
 6439 
 6440 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6441   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6442 }
 6443 
 6444 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6445   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6446   if (needs_stack_repair) {
 6447     movq(rbp, Address(rsp, initial_framesize));
 6448     // The stack increment resides just below the saved rbp
 6449     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6450   } else {
 6451     if (initial_framesize > 0) {
 6452       addq(rsp, initial_framesize);
 6453     }
 6454     pop(rbp);
 6455   }
 6456 }
 6457 
 6458 // Clearing constant sized memory using YMM/ZMM registers.
 6459 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6460   assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), "");
 6461   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6462 
 6463   int vector64_count = (cnt & (~0x7)) >> 3;
 6464   cnt = cnt & 0x7;
 6465   const int fill64_per_loop = 4;
 6466   const int max_unrolled_fill64 = 8;
 6467 
 6468   // 64 byte initialization loop.
 6469   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 6470   int start64 = 0;
 6471   if (vector64_count > max_unrolled_fill64) {
 6472     Label LOOP;
 6473     Register index = rtmp;
 6474 
 6475     start64 = vector64_count - (vector64_count % fill64_per_loop);
 6476 
 6477     movl(index, 0);

 6527         break;
 6528       case 7:
 6529         if (use64byteVector) {
 6530           movl(rtmp, 0x7F);
 6531           kmovwl(mask, rtmp);
 6532           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6533         } else {
 6534           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6535           movl(rtmp, 0x7);
 6536           kmovwl(mask, rtmp);
 6537           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6538         }
 6539         break;
 6540       default:
 6541         fatal("Unexpected length : %d\n",cnt);
 6542         break;
 6543     }
 6544   }
 6545 }
 6546 
 6547 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6548                                bool is_large, bool word_copy_only, KRegister mask) {
 6549   // cnt      - number of qwords (8-byte words).
 6550   // base     - start address, qword aligned.
 6551   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6552   assert(base==rdi, "base register must be edi for rep stos");
 6553   assert(val==rax,   "val register must be eax for rep stos");
 6554   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6555   assert(InitArrayShortSize % BytesPerLong == 0,
 6556     "InitArrayShortSize should be the multiple of BytesPerLong");
 6557 
 6558   Label DONE;



 6559 
 6560   if (!is_large) {
 6561     Label LOOP, LONG;
 6562     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6563     jccb(Assembler::greater, LONG);
 6564 
 6565     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6566 
 6567     decrement(cnt);
 6568     jccb(Assembler::negative, DONE); // Zero length
 6569 
 6570     // Use individual pointer-sized stores for small counts:
 6571     BIND(LOOP);
 6572     movptr(Address(base, cnt, Address::times_ptr), val);
 6573     decrement(cnt);
 6574     jccb(Assembler::greaterEqual, LOOP);
 6575     jmpb(DONE);
 6576 
 6577     BIND(LONG);
 6578   }
 6579 
 6580   // Use longer rep-prefixed ops for non-small counts:
 6581   if (UseFastStosb && !word_copy_only) {
 6582     shlptr(cnt, 3); // convert to number of bytes
 6583     rep_stosb();
 6584   } else if (UseXMMForObjInit) {
 6585     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6586   } else {
 6587     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6588     rep_stos();
 6589   }
 6590 
 6591   BIND(DONE);
 6592 }
 6593 
 6594 #endif //COMPILER2_OR_JVMCI
 6595 
 6596 
 6597 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6598                                    Register to, Register value, Register count,
 6599                                    Register rtmp, XMMRegister xtmp) {
 6600   ShortBranchVerifier sbv(this);
 6601   assert_different_registers(to, value, count, rtmp);
 6602   Label L_exit;
 6603   Label L_fill_2_bytes, L_fill_4_bytes;
 6604 
 6605 #if defined(COMPILER2) && defined(_LP64)
< prev index next >