< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/aotCodeCache.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"

   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "interpreter/interpreterRuntime.hpp"
   39 #include "jvm.h"
   40 #include "memory/resourceArea.hpp"
   41 #include "memory/universe.hpp"
   42 #include "oops/accessDecorators.hpp"
   43 #include "oops/compressedKlass.inline.hpp"
   44 #include "oops/compressedOops.inline.hpp"
   45 #include "oops/klass.inline.hpp"

   46 #include "prims/methodHandles.hpp"
   47 #include "runtime/continuation.hpp"
   48 #include "runtime/interfaceSupport.inline.hpp"
   49 #include "runtime/javaThread.hpp"
   50 #include "runtime/jniHandles.hpp"
   51 #include "runtime/objectMonitor.hpp"
   52 #include "runtime/os.hpp"
   53 #include "runtime/safepoint.hpp"
   54 #include "runtime/safepointMechanism.hpp"
   55 #include "runtime/sharedRuntime.hpp"

   56 #include "runtime/stubRoutines.hpp"
   57 #include "utilities/checkedCast.hpp"
   58 #include "utilities/macros.hpp"




   59 
   60 #ifdef PRODUCT
   61 #define BLOCK_COMMENT(str) /* nothing */
   62 #define STOP(error) stop(error)
   63 #else
   64 #define BLOCK_COMMENT(str) block_comment(str)
   65 #define STOP(error) block_comment(error); stop(error)
   66 #endif
   67 
   68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   69 
   70 #ifdef ASSERT
   71 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   72 #endif
   73 
   74 static const Assembler::Condition reverse[] = {
   75     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   76     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   77     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   78     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1286 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1287   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1288   assert_different_registers(arg_1, c_rarg2);
 1289   pass_arg2(this, arg_2);
 1290   pass_arg1(this, arg_1);
 1291   pass_arg0(this, arg_0);
 1292   call_VM_leaf(entry_point, 3);
 1293 }
 1294 
 1295 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1296   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1297   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1298   assert_different_registers(arg_2, c_rarg3);
 1299   pass_arg3(this, arg_3);
 1300   pass_arg2(this, arg_2);
 1301   pass_arg1(this, arg_1);
 1302   pass_arg0(this, arg_0);
 1303   call_VM_leaf(entry_point, 3);
 1304 }
 1305 




 1306 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1307   pass_arg0(this, arg_0);
 1308   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1309 }
 1310 
 1311 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1312   assert_different_registers(arg_0, c_rarg1);
 1313   pass_arg1(this, arg_1);
 1314   pass_arg0(this, arg_0);
 1315   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1316 }
 1317 
 1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1319   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1320   assert_different_registers(arg_1, c_rarg2);
 1321   pass_arg2(this, arg_2);
 1322   pass_arg1(this, arg_1);
 1323   pass_arg0(this, arg_0);
 1324   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1325 }

 2339     lea(rscratch, src);
 2340     Assembler::mulss(dst, Address(rscratch, 0));
 2341   }
 2342 }
 2343 
 2344 void MacroAssembler::null_check(Register reg, int offset) {
 2345   if (needs_explicit_null_check(offset)) {
 2346     // provoke OS null exception if reg is null by
 2347     // accessing M[reg] w/o changing any (non-CC) registers
 2348     // NOTE: cmpl is plenty here to provoke a segv
 2349     cmpptr(rax, Address(reg, 0));
 2350     // Note: should probably use testl(rax, Address(reg, 0));
 2351     //       may be shorter code (however, this version of
 2352     //       testl needs to be implemented first)
 2353   } else {
 2354     // nothing to do, (later) access of M[reg + offset]
 2355     // will provoke OS null exception if reg is null
 2356   }
 2357 }
 2358 











































































































 2359 void MacroAssembler::os_breakpoint() {
 2360   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2361   // (e.g., MSVC can't call ps() otherwise)
 2362   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2363 }
 2364 
 2365 void MacroAssembler::unimplemented(const char* what) {
 2366   const char* buf = nullptr;
 2367   {
 2368     ResourceMark rm;
 2369     stringStream ss;
 2370     ss.print("unimplemented: %s", what);
 2371     buf = code_string(ss.as_string());
 2372   }
 2373   stop(buf);
 2374 }
 2375 
 2376 #define XSTATE_BV 0x200
 2377 
 2378 void MacroAssembler::pop_CPU_state() {

 3421 }
 3422 
 3423 // C++ bool manipulation
 3424 void MacroAssembler::testbool(Register dst) {
 3425   if(sizeof(bool) == 1)
 3426     testb(dst, 0xff);
 3427   else if(sizeof(bool) == 2) {
 3428     // testw implementation needed for two byte bools
 3429     ShouldNotReachHere();
 3430   } else if(sizeof(bool) == 4)
 3431     testl(dst, dst);
 3432   else
 3433     // unsupported
 3434     ShouldNotReachHere();
 3435 }
 3436 
 3437 void MacroAssembler::testptr(Register dst, Register src) {
 3438   testq(dst, src);
 3439 }
 3440 






















































































































 3441 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3442 void MacroAssembler::tlab_allocate(Register obj,
 3443                                    Register var_size_in_bytes,
 3444                                    int con_size_in_bytes,
 3445                                    Register t1,
 3446                                    Register t2,
 3447                                    Label& slow_case) {
 3448   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3449   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3450 }
 3451 
 3452 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3453   RegSet regs;
 3454   regs += RegSet::of(rax, rcx, rdx);
 3455 #ifndef _WINDOWS
 3456   regs += RegSet::of(rsi, rdi);
 3457 #endif
 3458   regs += RegSet::range(r8, r11);
 3459   if (UseAPX) {
 3460     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));

 3624   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3625   if (UseIncDec) {
 3626     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3627   } else {
 3628     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3629     shrptr(index, 1);
 3630   }
 3631 
 3632   // initialize remaining object fields: index is a multiple of 2 now
 3633   {
 3634     Label loop;
 3635     bind(loop);
 3636     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3637     decrement(index);
 3638     jcc(Assembler::notZero, loop);
 3639   }
 3640 
 3641   bind(done);
 3642 }
 3643 



























 3644 // Look up the method for a megamorphic invokeinterface call.
 3645 // The target method is determined by <intf_klass, itable_index>.
 3646 // The receiver klass is in recv_klass.
 3647 // On success, the result will be in method_result, and execution falls through.
 3648 // On failure, execution transfers to the given label.
 3649 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3650                                              Register intf_klass,
 3651                                              RegisterOrConstant itable_index,
 3652                                              Register method_result,
 3653                                              Register scan_temp,
 3654                                              Label& L_no_such_interface,
 3655                                              bool return_method) {
 3656   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3657   assert_different_registers(method_result, intf_klass, scan_temp);
 3658   assert(recv_klass != method_result || !return_method,
 3659          "recv_klass can be destroyed when method isn't needed");
 3660 
 3661   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3662          "caller must use same register for non-constant itable index as for method");
 3663 

 4674   } else {
 4675     Label L;
 4676     jccb(negate_condition(cc), L);
 4677     movl(dst, src);
 4678     bind(L);
 4679   }
 4680 }
 4681 
 4682 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4683   if (VM_Version::supports_cmov()) {
 4684     cmovl(cc, dst, src);
 4685   } else {
 4686     Label L;
 4687     jccb(negate_condition(cc), L);
 4688     movl(dst, src);
 4689     bind(L);
 4690   }
 4691 }
 4692 
 4693 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4694   if (!VerifyOops) return;




 4695 
 4696   BLOCK_COMMENT("verify_oop {");
 4697   push(rscratch1);
 4698   push(rax);                          // save rax
 4699   push(reg);                          // pass register argument
 4700 
 4701   // Pass register number to verify_oop_subroutine
 4702   const char* b = nullptr;
 4703   {
 4704     ResourceMark rm;
 4705     stringStream ss;
 4706     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4707     b = code_string(ss.as_string());
 4708   }
 4709   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4710   pushptr(buffer.addr(), rscratch1);
 4711 
 4712   // call indirectly to solve generation ordering problem
 4713   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4714   call(rax);

 4733   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4734   int stackElementSize = Interpreter::stackElementSize;
 4735   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4736 #ifdef ASSERT
 4737   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4738   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4739 #endif
 4740   Register             scale_reg    = noreg;
 4741   Address::ScaleFactor scale_factor = Address::no_scale;
 4742   if (arg_slot.is_constant()) {
 4743     offset += arg_slot.as_constant() * stackElementSize;
 4744   } else {
 4745     scale_reg    = arg_slot.as_register();
 4746     scale_factor = Address::times(stackElementSize);
 4747   }
 4748   offset += wordSize;           // return PC is on stack
 4749   return Address(rsp, scale_reg, scale_factor, offset);
 4750 }
 4751 
 4752 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4753   if (!VerifyOops) return;




 4754 
 4755   push(rscratch1);
 4756   push(rax); // save rax,
 4757   // addr may contain rsp so we will have to adjust it based on the push
 4758   // we just did (and on 64 bit we do two pushes)
 4759   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4760   // stores rax into addr which is backwards of what was intended.
 4761   if (addr.uses(rsp)) {
 4762     lea(rax, addr);
 4763     pushptr(Address(rax, 2 * BytesPerWord));
 4764   } else {
 4765     pushptr(addr);
 4766   }
 4767 
 4768   // Pass register number to verify_oop_subroutine
 4769   const char* b = nullptr;
 4770   {
 4771     ResourceMark rm;
 4772     stringStream ss;
 4773     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);

 5127 
 5128 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5129   // get mirror
 5130   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5131   load_method_holder(mirror, method);
 5132   movptr(mirror, Address(mirror, mirror_offset));
 5133   resolve_oop_handle(mirror, tmp);
 5134 }
 5135 
 5136 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5137   load_method_holder(rresult, rmethod);
 5138   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5139 }
 5140 
 5141 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5142   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5143   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5144   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5145 }
 5146 










 5147 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5148   assert(UseCompactObjectHeaders, "expect compact object headers");
 5149   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5150   shrq(dst, markWord::klass_shift);
 5151 }
 5152 
 5153 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5154   assert_different_registers(src, tmp);
 5155   assert_different_registers(dst, tmp);
 5156 
 5157   if (UseCompactObjectHeaders) {
 5158     load_narrow_klass_compact(dst, src);
 5159     decode_klass_not_null(dst, tmp);
 5160   } else if (UseCompressedClassPointers) {
 5161     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5162     decode_klass_not_null(dst, tmp);
 5163   } else {
 5164     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5165   }
 5166 }
 5167 





 5168 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5169   assert(!UseCompactObjectHeaders, "not with compact headers");
 5170   assert_different_registers(src, tmp);
 5171   assert_different_registers(dst, tmp);
 5172   if (UseCompressedClassPointers) {
 5173     encode_klass_not_null(src, tmp);
 5174     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5175   } else {
 5176     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5177   }
 5178 }
 5179 
 5180 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5181   if (UseCompactObjectHeaders) {
 5182     assert(tmp != noreg, "need tmp");
 5183     assert_different_registers(klass, obj, tmp);
 5184     load_narrow_klass_compact(tmp, obj);
 5185     cmpl(klass, tmp);
 5186   } else if (UseCompressedClassPointers) {
 5187     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));

 5213   bool as_raw = (decorators & AS_RAW) != 0;
 5214   if (as_raw) {
 5215     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5216   } else {
 5217     bs->load_at(this, decorators, type, dst, src, tmp1);
 5218   }
 5219 }
 5220 
 5221 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5222                                      Register tmp1, Register tmp2, Register tmp3) {
 5223   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5224   decorators = AccessInternal::decorator_fixup(decorators, type);
 5225   bool as_raw = (decorators & AS_RAW) != 0;
 5226   if (as_raw) {
 5227     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5228   } else {
 5229     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5230   }
 5231 }
 5232 








































 5233 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5234   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5235 }
 5236 
 5237 // Doesn't do verification, generates fixed size code
 5238 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5239   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5240 }
 5241 
 5242 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5243                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5244   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5245 }
 5246 
 5247 // Used for storing nulls.
 5248 void MacroAssembler::store_heap_oop_null(Address dst) {
 5249   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5250 }
 5251 
 5252 void MacroAssembler::store_klass_gap(Register dst, Register src) {

 5572   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5573 }
 5574 
 5575 void MacroAssembler::reinit_heapbase() {
 5576   if (UseCompressedOops) {
 5577     if (Universe::heap() != nullptr) {
 5578       if (CompressedOops::base() == nullptr) {
 5579         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5580       } else {
 5581         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 5582       }
 5583     } else {
 5584       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 5585     }
 5586   }
 5587 }
 5588 
 5589 #if COMPILER2_OR_JVMCI
 5590 
 5591 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5592 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5593   // cnt - number of qwords (8-byte words).
 5594   // base - start address, qword aligned.
 5595   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5596   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5597   if (use64byteVector) {
 5598     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
 5599   } else if (MaxVectorSize >= 32) {
 5600     vpxor(xtmp, xtmp, xtmp, AVX_256bit);


 5601   } else {
 5602     pxor(xtmp, xtmp);

 5603   }
 5604   jmp(L_zero_64_bytes);
 5605 
 5606   BIND(L_loop);
 5607   if (MaxVectorSize >= 32) {
 5608     fill64(base, 0, xtmp, use64byteVector);
 5609   } else {
 5610     movdqu(Address(base,  0), xtmp);
 5611     movdqu(Address(base, 16), xtmp);
 5612     movdqu(Address(base, 32), xtmp);
 5613     movdqu(Address(base, 48), xtmp);
 5614   }
 5615   addptr(base, 64);
 5616 
 5617   BIND(L_zero_64_bytes);
 5618   subptr(cnt, 8);
 5619   jccb(Assembler::greaterEqual, L_loop);
 5620 
 5621   // Copy trailing 64 bytes
 5622   if (use64byteVector) {
 5623     addptr(cnt, 8);
 5624     jccb(Assembler::equal, L_end);
 5625     fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
 5626     jmp(L_end);
 5627   } else {
 5628     addptr(cnt, 4);
 5629     jccb(Assembler::less, L_tail);
 5630     if (MaxVectorSize >= 32) {
 5631       vmovdqu(Address(base, 0), xtmp);
 5632     } else {
 5633       movdqu(Address(base,  0), xtmp);
 5634       movdqu(Address(base, 16), xtmp);
 5635     }
 5636   }
 5637   addptr(base, 32);
 5638   subptr(cnt, 4);
 5639 
 5640   BIND(L_tail);
 5641   addptr(cnt, 4);
 5642   jccb(Assembler::lessEqual, L_end);
 5643   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5644     fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
 5645   } else {
 5646     decrement(cnt);
 5647 
 5648     BIND(L_sloop);
 5649     movq(Address(base, 0), xtmp);
 5650     addptr(base, 8);
 5651     decrement(cnt);
 5652     jccb(Assembler::greaterEqual, L_sloop);
 5653   }
 5654   BIND(L_end);
 5655 }
 5656 


























































































































































































































































































































































































































 5657 // Clearing constant sized memory using YMM/ZMM registers.
 5658 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5659   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 5660   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 5661 
 5662   int vector64_count = (cnt & (~0x7)) >> 3;
 5663   cnt = cnt & 0x7;
 5664   const int fill64_per_loop = 4;
 5665   const int max_unrolled_fill64 = 8;
 5666 
 5667   // 64 byte initialization loop.
 5668   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 5669   int start64 = 0;
 5670   if (vector64_count > max_unrolled_fill64) {
 5671     Label LOOP;
 5672     Register index = rtmp;
 5673 
 5674     start64 = vector64_count - (vector64_count % fill64_per_loop);
 5675 
 5676     movl(index, 0);

 5726         break;
 5727       case 7:
 5728         if (use64byteVector) {
 5729           movl(rtmp, 0x7F);
 5730           kmovwl(mask, rtmp);
 5731           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5732         } else {
 5733           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5734           movl(rtmp, 0x7);
 5735           kmovwl(mask, rtmp);
 5736           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 5737         }
 5738         break;
 5739       default:
 5740         fatal("Unexpected length : %d\n",cnt);
 5741         break;
 5742     }
 5743   }
 5744 }
 5745 
 5746 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
 5747                                bool is_large, KRegister mask) {
 5748   // cnt      - number of qwords (8-byte words).
 5749   // base     - start address, qword aligned.
 5750   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 5751   assert(base==rdi, "base register must be edi for rep stos");
 5752   assert(tmp==rax,   "tmp register must be eax for rep stos");
 5753   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 5754   assert(InitArrayShortSize % BytesPerLong == 0,
 5755     "InitArrayShortSize should be the multiple of BytesPerLong");
 5756 
 5757   Label DONE;
 5758   if (!is_large || !UseXMMForObjInit) {
 5759     xorptr(tmp, tmp);
 5760   }
 5761 
 5762   if (!is_large) {
 5763     Label LOOP, LONG;
 5764     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 5765     jccb(Assembler::greater, LONG);
 5766 
 5767     decrement(cnt);
 5768     jccb(Assembler::negative, DONE); // Zero length
 5769 
 5770     // Use individual pointer-sized stores for small counts:
 5771     BIND(LOOP);
 5772     movptr(Address(base, cnt, Address::times_ptr), tmp);
 5773     decrement(cnt);
 5774     jccb(Assembler::greaterEqual, LOOP);
 5775     jmpb(DONE);
 5776 
 5777     BIND(LONG);
 5778   }
 5779 
 5780   // Use longer rep-prefixed ops for non-small counts:
 5781   if (UseFastStosb) {
 5782     shlptr(cnt, 3); // convert to number of bytes
 5783     rep_stosb();
 5784   } else if (UseXMMForObjInit) {
 5785     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
 5786   } else {
 5787     rep_stos();
 5788   }
 5789 
 5790   BIND(DONE);
 5791 }
 5792 
 5793 #endif //COMPILER2_OR_JVMCI
 5794 
 5795 
 5796 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 5797                                    Register to, Register value, Register count,
 5798                                    Register rtmp, XMMRegister xtmp) {
 5799   ShortBranchVerifier sbv(this);
 5800   assert_different_registers(to, value, count, rtmp);
 5801   Label L_exit;
 5802   Label L_fill_2_bytes, L_fill_4_bytes;
 5803 
 5804 #if defined(COMPILER2)
 5805   if(MaxVectorSize >=32 &&

 9685 
 9686   // Load top.
 9687   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9688 
 9689   // Check if the lock-stack is full.
 9690   cmpl(top, LockStack::end_offset());
 9691   jcc(Assembler::greaterEqual, slow);
 9692 
 9693   // Check for recursion.
 9694   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
 9695   jcc(Assembler::equal, push);
 9696 
 9697   // Check header for monitor (0b10).
 9698   testptr(reg_rax, markWord::monitor_value);
 9699   jcc(Assembler::notZero, slow);
 9700 
 9701   // Try to lock. Transition lock bits 0b01 => 0b00
 9702   movptr(tmp, reg_rax);
 9703   andptr(tmp, ~(int32_t)markWord::unlocked_value);
 9704   orptr(reg_rax, markWord::unlocked_value);



 9705   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
 9706   jcc(Assembler::notEqual, slow);
 9707 
 9708   // Restore top, CAS clobbers register.
 9709   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9710 
 9711   bind(push);
 9712   // After successful lock, push object on lock-stack.
 9713   movptr(Address(thread, top), obj);
 9714   incrementl(top, oopSize);
 9715   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
 9716 }
 9717 
 9718 // Implements lightweight-unlocking.
 9719 //
 9720 // obj: the object to be unlocked
 9721 // reg_rax: rax
 9722 // thread: the thread
 9723 // tmp: a temporary register
 9724 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {

   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/aotCodeCache.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"
   31 #include "ci/ciInlineKlass.hpp"
   32 #include "crc32c.h"
   33 #include "gc/shared/barrierSet.hpp"
   34 #include "gc/shared/barrierSetAssembler.hpp"
   35 #include "gc/shared/collectedHeap.inline.hpp"
   36 #include "gc/shared/tlab_globals.hpp"
   37 #include "interpreter/bytecodeHistogram.hpp"
   38 #include "interpreter/interpreter.hpp"
   39 #include "interpreter/interpreterRuntime.hpp"
   40 #include "jvm.h"
   41 #include "memory/resourceArea.hpp"
   42 #include "memory/universe.hpp"
   43 #include "oops/accessDecorators.hpp"
   44 #include "oops/compressedKlass.inline.hpp"
   45 #include "oops/compressedOops.inline.hpp"
   46 #include "oops/klass.inline.hpp"
   47 #include "oops/resolvedFieldEntry.hpp"
   48 #include "prims/methodHandles.hpp"
   49 #include "runtime/continuation.hpp"
   50 #include "runtime/interfaceSupport.inline.hpp"
   51 #include "runtime/javaThread.hpp"
   52 #include "runtime/jniHandles.hpp"
   53 #include "runtime/objectMonitor.hpp"
   54 #include "runtime/os.hpp"
   55 #include "runtime/safepoint.hpp"
   56 #include "runtime/safepointMechanism.hpp"
   57 #include "runtime/sharedRuntime.hpp"
   58 #include "runtime/signature_cc.hpp"
   59 #include "runtime/stubRoutines.hpp"
   60 #include "utilities/checkedCast.hpp"
   61 #include "utilities/macros.hpp"
   62 #include "vmreg_x86.inline.hpp"
   63 #ifdef COMPILER2
   64 #include "opto/output.hpp"
   65 #endif
   66 
   67 #ifdef PRODUCT
   68 #define BLOCK_COMMENT(str) /* nothing */
   69 #define STOP(error) stop(error)
   70 #else
   71 #define BLOCK_COMMENT(str) block_comment(str)
   72 #define STOP(error) block_comment(error); stop(error)
   73 #endif
   74 
   75 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   76 
   77 #ifdef ASSERT
   78 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   79 #endif
   80 
   81 static const Assembler::Condition reverse[] = {
   82     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   83     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   84     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   85     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1293 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1294   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1295   assert_different_registers(arg_1, c_rarg2);
 1296   pass_arg2(this, arg_2);
 1297   pass_arg1(this, arg_1);
 1298   pass_arg0(this, arg_0);
 1299   call_VM_leaf(entry_point, 3);
 1300 }
 1301 
 1302 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1303   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1304   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1305   assert_different_registers(arg_2, c_rarg3);
 1306   pass_arg3(this, arg_3);
 1307   pass_arg2(this, arg_2);
 1308   pass_arg1(this, arg_1);
 1309   pass_arg0(this, arg_0);
 1310   call_VM_leaf(entry_point, 3);
 1311 }
 1312 
 1313 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1314   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1315 }
 1316 
 1317 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1318   pass_arg0(this, arg_0);
 1319   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1320 }
 1321 
 1322 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1323   assert_different_registers(arg_0, c_rarg1);
 1324   pass_arg1(this, arg_1);
 1325   pass_arg0(this, arg_0);
 1326   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1327 }
 1328 
 1329 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1330   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1331   assert_different_registers(arg_1, c_rarg2);
 1332   pass_arg2(this, arg_2);
 1333   pass_arg1(this, arg_1);
 1334   pass_arg0(this, arg_0);
 1335   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1336 }

 2350     lea(rscratch, src);
 2351     Assembler::mulss(dst, Address(rscratch, 0));
 2352   }
 2353 }
 2354 
 2355 void MacroAssembler::null_check(Register reg, int offset) {
 2356   if (needs_explicit_null_check(offset)) {
 2357     // provoke OS null exception if reg is null by
 2358     // accessing M[reg] w/o changing any (non-CC) registers
 2359     // NOTE: cmpl is plenty here to provoke a segv
 2360     cmpptr(rax, Address(reg, 0));
 2361     // Note: should probably use testl(rax, Address(reg, 0));
 2362     //       may be shorter code (however, this version of
 2363     //       testl needs to be implemented first)
 2364   } else {
 2365     // nothing to do, (later) access of M[reg + offset]
 2366     // will provoke OS null exception if reg is null
 2367   }
 2368 }
 2369 
 2370 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2371   andptr(markword, markWord::inline_type_mask_in_place);
 2372   cmpptr(markword, markWord::inline_type_pattern);
 2373   jcc(Assembler::equal, is_inline_type);
 2374 }
 2375 
 2376 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
 2377   if (can_be_null) {
 2378     testptr(object, object);
 2379     jcc(Assembler::zero, not_inline_type);
 2380   }
 2381   const int is_inline_type_mask = markWord::inline_type_pattern;
 2382   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2383   andptr(tmp, is_inline_type_mask);
 2384   cmpptr(tmp, is_inline_type_mask);
 2385   jcc(Assembler::notEqual, not_inline_type);
 2386 }
 2387 
 2388 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2389   movl(temp_reg, flags);
 2390   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2391   jcc(Assembler::notEqual, is_null_free_inline_type);
 2392 }
 2393 
 2394 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2395   movl(temp_reg, flags);
 2396   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2397   jcc(Assembler::equal, not_null_free_inline_type);
 2398 }
 2399 
 2400 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
 2401   movl(temp_reg, flags);
 2402   testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
 2403   jcc(Assembler::notEqual, is_flat);
 2404 }
 2405 
 2406 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
 2407   movl(temp_reg, flags);
 2408   testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
 2409   jcc(Assembler::notEqual, has_null_marker);
 2410 }
 2411 
 2412 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2413   Label test_mark_word;
 2414   // load mark word
 2415   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2416   // check displaced
 2417   testl(temp_reg, markWord::unlocked_value);
 2418   jccb(Assembler::notZero, test_mark_word);
 2419   // slow path use klass prototype
 2420   push(rscratch1);
 2421   load_prototype_header(temp_reg, oop, rscratch1);
 2422   pop(rscratch1);
 2423 
 2424   bind(test_mark_word);
 2425   testl(temp_reg, test_bit);
 2426   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2427 }
 2428 
 2429 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
 2430                                          Label& is_flat_array) {
 2431 #ifdef _LP64
 2432   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
 2433 #else
 2434   load_klass(temp_reg, oop, noreg);
 2435   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2436   test_flat_array_layout(temp_reg, is_flat_array);
 2437 #endif
 2438 }
 2439 
 2440 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
 2441                                              Label& is_non_flat_array) {
 2442 #ifdef _LP64
 2443   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
 2444 #else
 2445   load_klass(temp_reg, oop, noreg);
 2446   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2447   test_non_flat_array_layout(temp_reg, is_non_flat_array);
 2448 #endif
 2449 }
 2450 
 2451 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 2452 #ifdef _LP64
 2453   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 2454 #else
 2455   Unimplemented();
 2456 #endif
 2457 }
 2458 
 2459 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 2460 #ifdef _LP64
 2461   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 2462 #else
 2463   Unimplemented();
 2464 #endif
 2465 }
 2466 
 2467 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
 2468   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2469   jcc(Assembler::notZero, is_flat_array);
 2470 }
 2471 
 2472 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
 2473   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2474   jcc(Assembler::zero, is_non_flat_array);
 2475 }
 2476 
 2477 void MacroAssembler::os_breakpoint() {
 2478   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2479   // (e.g., MSVC can't call ps() otherwise)
 2480   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2481 }
 2482 
 2483 void MacroAssembler::unimplemented(const char* what) {
 2484   const char* buf = nullptr;
 2485   {
 2486     ResourceMark rm;
 2487     stringStream ss;
 2488     ss.print("unimplemented: %s", what);
 2489     buf = code_string(ss.as_string());
 2490   }
 2491   stop(buf);
 2492 }
 2493 
 2494 #define XSTATE_BV 0x200
 2495 
 2496 void MacroAssembler::pop_CPU_state() {

 3539 }
 3540 
 3541 // C++ bool manipulation
 3542 void MacroAssembler::testbool(Register dst) {
 3543   if(sizeof(bool) == 1)
 3544     testb(dst, 0xff);
 3545   else if(sizeof(bool) == 2) {
 3546     // testw implementation needed for two byte bools
 3547     ShouldNotReachHere();
 3548   } else if(sizeof(bool) == 4)
 3549     testl(dst, dst);
 3550   else
 3551     // unsupported
 3552     ShouldNotReachHere();
 3553 }
 3554 
 3555 void MacroAssembler::testptr(Register dst, Register src) {
 3556   testq(dst, src);
 3557 }
 3558 
 3559 // Object / value buffer allocation...
 3560 //
 3561 // Kills klass and rsi on LP64
 3562 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 3563                                        Register t1, Register t2,
 3564                                        bool clear_fields, Label& alloc_failed)
 3565 {
 3566   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 3567   Register layout_size = t1;
 3568   assert(new_obj == rax, "needs to be rax");
 3569   assert_different_registers(klass, new_obj, t1, t2);
 3570 
 3571   // get instance_size in InstanceKlass (scaled to a count of bytes)
 3572   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 3573   // test to see if it is malformed in some way
 3574   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 3575   jcc(Assembler::notZero, slow_case_no_pop);
 3576 
 3577   // Allocate the instance:
 3578   //  If TLAB is enabled:
 3579   //    Try to allocate in the TLAB.
 3580   //    If fails, go to the slow path.
 3581   //  Else If inline contiguous allocations are enabled:
 3582   //    Try to allocate in eden.
 3583   //    If fails due to heap end, go to slow path.
 3584   //
 3585   //  If TLAB is enabled OR inline contiguous is enabled:
 3586   //    Initialize the allocation.
 3587   //    Exit.
 3588   //
 3589   //  Go to slow path.
 3590 
 3591   push(klass);
 3592   if (UseTLAB) {
 3593     tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
 3594     if (ZeroTLAB || (!clear_fields)) {
 3595       // the fields have been already cleared
 3596       jmp(initialize_header);
 3597     } else {
 3598       // initialize both the header and fields
 3599       jmp(initialize_object);
 3600     }
 3601   } else {
 3602     jmp(slow_case);
 3603   }
 3604 
 3605   // If UseTLAB is true, the object is created above and there is an initialize need.
 3606   // Otherwise, skip and go to the slow path.
 3607   if (UseTLAB) {
 3608     if (clear_fields) {
 3609       // The object is initialized before the header.  If the object size is
 3610       // zero, go directly to the header initialization.
 3611       bind(initialize_object);
 3612       if (UseCompactObjectHeaders) {
 3613         assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
 3614         decrement(layout_size, oopDesc::base_offset_in_bytes());
 3615       } else {
 3616         decrement(layout_size, sizeof(oopDesc));
 3617       }
 3618       jcc(Assembler::zero, initialize_header);
 3619 
 3620       // Initialize topmost object field, divide size by 8, check if odd and
 3621       // test if zero.
 3622       Register zero = klass;
 3623       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 3624       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 3625 
 3626   #ifdef ASSERT
 3627       // make sure instance_size was multiple of 8
 3628       Label L;
 3629       // Ignore partial flag stall after shrl() since it is debug VM
 3630       jcc(Assembler::carryClear, L);
 3631       stop("object size is not multiple of 2 - adjust this code");
 3632       bind(L);
 3633       // must be > 0, no extra check needed here
 3634   #endif
 3635 
 3636       // initialize remaining object fields: instance_size was a multiple of 8
 3637       {
 3638         Label loop;
 3639         bind(loop);
 3640         int header_size_bytes = oopDesc::header_size() * HeapWordSize;
 3641         assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
 3642         movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
 3643         decrement(layout_size);
 3644         jcc(Assembler::notZero, loop);
 3645       }
 3646     } // clear_fields
 3647 
 3648     // initialize object header only.
 3649     bind(initialize_header);
 3650     if (UseCompactObjectHeaders || EnableValhalla) {
 3651       pop(klass);
 3652       Register mark_word = t2;
 3653       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 3654       movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 3655     } else {
 3656      movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
 3657             (intptr_t)markWord::prototype().value()); // header
 3658      pop(klass);   // get saved klass back in the register.
 3659     }
 3660     if (!UseCompactObjectHeaders) {
 3661       xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 3662       store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 3663       movptr(t2, klass);         // preserve klass
 3664       store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 3665     }
 3666     jmp(done);
 3667   }
 3668 
 3669   bind(slow_case);
 3670   pop(klass);
 3671   bind(slow_case_no_pop);
 3672   jmp(alloc_failed);
 3673 
 3674   bind(done);
 3675 }
 3676 
 3677 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3678 void MacroAssembler::tlab_allocate(Register obj,
 3679                                    Register var_size_in_bytes,
 3680                                    int con_size_in_bytes,
 3681                                    Register t1,
 3682                                    Register t2,
 3683                                    Label& slow_case) {
 3684   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3685   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3686 }
 3687 
 3688 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3689   RegSet regs;
 3690   regs += RegSet::of(rax, rcx, rdx);
 3691 #ifndef _WINDOWS
 3692   regs += RegSet::of(rsi, rdi);
 3693 #endif
 3694   regs += RegSet::range(r8, r11);
 3695   if (UseAPX) {
 3696     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));

 3860   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3861   if (UseIncDec) {
 3862     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3863   } else {
 3864     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3865     shrptr(index, 1);
 3866   }
 3867 
 3868   // initialize remaining object fields: index is a multiple of 2 now
 3869   {
 3870     Label loop;
 3871     bind(loop);
 3872     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3873     decrement(index);
 3874     jcc(Assembler::notZero, loop);
 3875   }
 3876 
 3877   bind(done);
 3878 }
 3879 
 3880 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
 3881   inline_layout_info(holder_klass, index, inline_klass);
 3882   movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
 3883 }
 3884 
 3885 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
 3886   movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
 3887 #ifdef ASSERT
 3888   {
 3889     Label done;
 3890     cmpptr(layout_info, 0);
 3891     jcc(Assembler::notEqual, done);
 3892     stop("inline_layout_info_array is null");
 3893     bind(done);
 3894   }
 3895 #endif
 3896 
 3897   InlineLayoutInfo array[2];
 3898   int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
 3899   if (is_power_of_2(size)) {
 3900     shll(index, log2i_exact(size)); // Scale index by power of 2
 3901   } else {
 3902     imull(index, index, size); // Scale the index to be the entry index * array_element_size
 3903   }
 3904   lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
 3905 }
 3906 
 3907 // Look up the method for a megamorphic invokeinterface call.
 3908 // The target method is determined by <intf_klass, itable_index>.
 3909 // The receiver klass is in recv_klass.
 3910 // On success, the result will be in method_result, and execution falls through.
 3911 // On failure, execution transfers to the given label.
 3912 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3913                                              Register intf_klass,
 3914                                              RegisterOrConstant itable_index,
 3915                                              Register method_result,
 3916                                              Register scan_temp,
 3917                                              Label& L_no_such_interface,
 3918                                              bool return_method) {
 3919   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3920   assert_different_registers(method_result, intf_klass, scan_temp);
 3921   assert(recv_klass != method_result || !return_method,
 3922          "recv_klass can be destroyed when method isn't needed");
 3923 
 3924   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3925          "caller must use same register for non-constant itable index as for method");
 3926 

 4937   } else {
 4938     Label L;
 4939     jccb(negate_condition(cc), L);
 4940     movl(dst, src);
 4941     bind(L);
 4942   }
 4943 }
 4944 
 4945 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4946   if (VM_Version::supports_cmov()) {
 4947     cmovl(cc, dst, src);
 4948   } else {
 4949     Label L;
 4950     jccb(negate_condition(cc), L);
 4951     movl(dst, src);
 4952     bind(L);
 4953   }
 4954 }
 4955 
 4956 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4957   if (!VerifyOops || VerifyAdapterSharing) {
 4958     // Below address of the code string confuses VerifyAdapterSharing
 4959     // because it may differ between otherwise equivalent adapters.
 4960     return;
 4961   }
 4962 
 4963   BLOCK_COMMENT("verify_oop {");
 4964   push(rscratch1);
 4965   push(rax);                          // save rax
 4966   push(reg);                          // pass register argument
 4967 
 4968   // Pass register number to verify_oop_subroutine
 4969   const char* b = nullptr;
 4970   {
 4971     ResourceMark rm;
 4972     stringStream ss;
 4973     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4974     b = code_string(ss.as_string());
 4975   }
 4976   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4977   pushptr(buffer.addr(), rscratch1);
 4978 
 4979   // call indirectly to solve generation ordering problem
 4980   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4981   call(rax);

 5000   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 5001   int stackElementSize = Interpreter::stackElementSize;
 5002   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 5003 #ifdef ASSERT
 5004   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 5005   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 5006 #endif
 5007   Register             scale_reg    = noreg;
 5008   Address::ScaleFactor scale_factor = Address::no_scale;
 5009   if (arg_slot.is_constant()) {
 5010     offset += arg_slot.as_constant() * stackElementSize;
 5011   } else {
 5012     scale_reg    = arg_slot.as_register();
 5013     scale_factor = Address::times(stackElementSize);
 5014   }
 5015   offset += wordSize;           // return PC is on stack
 5016   return Address(rsp, scale_reg, scale_factor, offset);
 5017 }
 5018 
 5019 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5020   if (!VerifyOops || VerifyAdapterSharing) {
 5021     // Below address of the code string confuses VerifyAdapterSharing
 5022     // because it may differ between otherwise equivalent adapters.
 5023     return;
 5024   }
 5025 
 5026   push(rscratch1);
 5027   push(rax); // save rax,
 5028   // addr may contain rsp so we will have to adjust it based on the push
 5029   // we just did (and on 64 bit we do two pushes)
 5030   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5031   // stores rax into addr which is backwards of what was intended.
 5032   if (addr.uses(rsp)) {
 5033     lea(rax, addr);
 5034     pushptr(Address(rax, 2 * BytesPerWord));
 5035   } else {
 5036     pushptr(addr);
 5037   }
 5038 
 5039   // Pass register number to verify_oop_subroutine
 5040   const char* b = nullptr;
 5041   {
 5042     ResourceMark rm;
 5043     stringStream ss;
 5044     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);

 5398 
 5399 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5400   // get mirror
 5401   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5402   load_method_holder(mirror, method);
 5403   movptr(mirror, Address(mirror, mirror_offset));
 5404   resolve_oop_handle(mirror, tmp);
 5405 }
 5406 
 5407 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5408   load_method_holder(rresult, rmethod);
 5409   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5410 }
 5411 
 5412 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5413   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5414   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5415   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5416 }
 5417 
 5418 void MacroAssembler::load_metadata(Register dst, Register src) {
 5419   if (UseCompactObjectHeaders) {
 5420     load_narrow_klass_compact(dst, src);
 5421   } else if (UseCompressedClassPointers) {
 5422     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5423   } else {
 5424     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5425   }
 5426 }
 5427 
 5428 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5429   assert(UseCompactObjectHeaders, "expect compact object headers");
 5430   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5431   shrq(dst, markWord::klass_shift);
 5432 }
 5433 
 5434 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5435   assert_different_registers(src, tmp);
 5436   assert_different_registers(dst, tmp);
 5437 
 5438   if (UseCompactObjectHeaders) {
 5439     load_narrow_klass_compact(dst, src);
 5440     decode_klass_not_null(dst, tmp);
 5441   } else if (UseCompressedClassPointers) {
 5442     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5443     decode_klass_not_null(dst, tmp);
 5444   } else {
 5445     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5446   }
 5447 }
 5448 
 5449 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5450   load_klass(dst, src, tmp);
 5451   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5452 }
 5453 
 5454 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5455   assert(!UseCompactObjectHeaders, "not with compact headers");
 5456   assert_different_registers(src, tmp);
 5457   assert_different_registers(dst, tmp);
 5458   if (UseCompressedClassPointers) {
 5459     encode_klass_not_null(src, tmp);
 5460     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5461   } else {
 5462     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5463   }
 5464 }
 5465 
 5466 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5467   if (UseCompactObjectHeaders) {
 5468     assert(tmp != noreg, "need tmp");
 5469     assert_different_registers(klass, obj, tmp);
 5470     load_narrow_klass_compact(tmp, obj);
 5471     cmpl(klass, tmp);
 5472   } else if (UseCompressedClassPointers) {
 5473     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));

 5499   bool as_raw = (decorators & AS_RAW) != 0;
 5500   if (as_raw) {
 5501     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5502   } else {
 5503     bs->load_at(this, decorators, type, dst, src, tmp1);
 5504   }
 5505 }
 5506 
 5507 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5508                                      Register tmp1, Register tmp2, Register tmp3) {
 5509   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5510   decorators = AccessInternal::decorator_fixup(decorators, type);
 5511   bool as_raw = (decorators & AS_RAW) != 0;
 5512   if (as_raw) {
 5513     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5514   } else {
 5515     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5516   }
 5517 }
 5518 
 5519 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
 5520                                      Register inline_layout_info) {
 5521   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5522   bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
 5523 }
 5524 
 5525 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
 5526   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 5527   movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
 5528 }
 5529 
 5530 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
 5531   // ((address) (void*) o) + vk->payload_offset();
 5532   Register offset = (data == oop) ? rscratch1 : data;
 5533   payload_offset(inline_klass, offset);
 5534   if (data == oop) {
 5535     addptr(data, offset);
 5536   } else {
 5537     lea(data, Address(oop, offset));
 5538   }
 5539 }
 5540 
 5541 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5542                                                 Register index, Register data) {
 5543   assert(index != rcx, "index needs to shift by rcx");
 5544   assert_different_registers(array, array_klass, index);
 5545   assert_different_registers(rcx, array, index);
 5546 
 5547   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5548   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5549 
 5550   // Klass::layout_helper_log2_element_size(lh)
 5551   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5552   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5553   andl(rcx, Klass::_lh_log2_element_size_mask);
 5554   shlptr(index); // index << rcx
 5555 
 5556   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
 5557 }
 5558 
 5559 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5560   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5561 }
 5562 
 5563 // Doesn't do verification, generates fixed size code
 5564 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5565   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5566 }
 5567 
 5568 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5569                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5570   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5571 }
 5572 
 5573 // Used for storing nulls.
 5574 void MacroAssembler::store_heap_oop_null(Address dst) {
 5575   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5576 }
 5577 
 5578 void MacroAssembler::store_klass_gap(Register dst, Register src) {

 5898   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5899 }
 5900 
 5901 void MacroAssembler::reinit_heapbase() {
 5902   if (UseCompressedOops) {
 5903     if (Universe::heap() != nullptr) {
 5904       if (CompressedOops::base() == nullptr) {
 5905         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5906       } else {
 5907         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 5908       }
 5909     } else {
 5910       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 5911     }
 5912   }
 5913 }
 5914 
 5915 #if COMPILER2_OR_JVMCI
 5916 
 5917 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5918 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 5919   // cnt - number of qwords (8-byte words).
 5920   // base - start address, qword aligned.
 5921   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5922   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5923   if (use64byteVector) {
 5924     evpbroadcastq(xtmp, val, AVX_512bit);
 5925   } else if (MaxVectorSize >= 32) {
 5926     movdq(xtmp, val);
 5927     punpcklqdq(xtmp, xtmp);
 5928     vinserti128_high(xtmp, xtmp);
 5929   } else {
 5930     movdq(xtmp, val);
 5931     punpcklqdq(xtmp, xtmp);
 5932   }
 5933   jmp(L_zero_64_bytes);
 5934 
 5935   BIND(L_loop);
 5936   if (MaxVectorSize >= 32) {
 5937     fill64(base, 0, xtmp, use64byteVector);
 5938   } else {
 5939     movdqu(Address(base,  0), xtmp);
 5940     movdqu(Address(base, 16), xtmp);
 5941     movdqu(Address(base, 32), xtmp);
 5942     movdqu(Address(base, 48), xtmp);
 5943   }
 5944   addptr(base, 64);
 5945 
 5946   BIND(L_zero_64_bytes);
 5947   subptr(cnt, 8);
 5948   jccb(Assembler::greaterEqual, L_loop);
 5949 
 5950   // Copy trailing 64 bytes
 5951   if (use64byteVector) {
 5952     addptr(cnt, 8);
 5953     jccb(Assembler::equal, L_end);
 5954     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 5955     jmp(L_end);
 5956   } else {
 5957     addptr(cnt, 4);
 5958     jccb(Assembler::less, L_tail);
 5959     if (MaxVectorSize >= 32) {
 5960       vmovdqu(Address(base, 0), xtmp);
 5961     } else {
 5962       movdqu(Address(base,  0), xtmp);
 5963       movdqu(Address(base, 16), xtmp);
 5964     }
 5965   }
 5966   addptr(base, 32);
 5967   subptr(cnt, 4);
 5968 
 5969   BIND(L_tail);
 5970   addptr(cnt, 4);
 5971   jccb(Assembler::lessEqual, L_end);
 5972   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5973     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 5974   } else {
 5975     decrement(cnt);
 5976 
 5977     BIND(L_sloop);
 5978     movq(Address(base, 0), xtmp);
 5979     addptr(base, 8);
 5980     decrement(cnt);
 5981     jccb(Assembler::greaterEqual, L_sloop);
 5982   }
 5983   BIND(L_end);
 5984 }
 5985 
 5986 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 5987   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 5988   // An inline type might be returned. If fields are in registers we
 5989   // need to allocate an inline type instance and initialize it with
 5990   // the value of the fields.
 5991   Label skip;
 5992   // We only need a new buffered inline type if a new one is not returned
 5993   testptr(rax, 1);
 5994   jcc(Assembler::zero, skip);
 5995   int call_offset = -1;
 5996 
 5997 #ifdef _LP64
 5998   // The following code is similar to allocate_instance but has some slight differences,
 5999   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 6000   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
 6001   Label slow_case;
 6002   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 6003   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 6004   if (vk != nullptr) {
 6005     // Called from C1, where the return type is statically known.
 6006     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 6007     jint lh = vk->layout_helper();
 6008     assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 6009     if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
 6010       tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
 6011     } else {
 6012       jmp(slow_case);
 6013     }
 6014   } else {
 6015     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 6016     mov(rbx, rax);
 6017     andptr(rbx, -2);
 6018     if (UseTLAB) {
 6019       movl(r14, Address(rbx, Klass::layout_helper_offset()));
 6020       testl(r14, Klass::_lh_instance_slow_path_bit);
 6021       jcc(Assembler::notZero, slow_case);
 6022       tlab_allocate(rax, r14, 0, r13, r14, slow_case);
 6023     } else {
 6024       jmp(slow_case);
 6025     }
 6026   }
 6027   if (UseTLAB) {
 6028     // 2. Initialize buffered inline instance header
 6029     Register buffer_obj = rax;
 6030     Register klass = rbx;
 6031     if (UseCompactObjectHeaders) {
 6032       Register mark_word = r13;
 6033       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 6034       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), mark_word);
 6035     } else {
 6036       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 6037       xorl(r13, r13);
 6038       store_klass_gap(buffer_obj, r13);
 6039       if (vk == nullptr) {
 6040         // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 6041         mov(r13, klass);
 6042       }
 6043       store_klass(buffer_obj, klass, rscratch1);
 6044       klass = r13;
 6045     }
 6046     // 3. Initialize its fields with an inline class specific handler
 6047     if (vk != nullptr) {
 6048       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 6049     } else {
 6050       movptr(rbx, Address(klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 6051       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 6052       call(rbx);
 6053     }
 6054     jmp(skip);
 6055   }
 6056   bind(slow_case);
 6057   // We failed to allocate a new inline type, fall back to a runtime
 6058   // call. Some oop field may be live in some registers but we can't
 6059   // tell. That runtime call will take care of preserving them
 6060   // across a GC if there's one.
 6061   mov(rax, rscratch1);
 6062 #endif
 6063 
 6064   if (from_interpreter) {
 6065     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 6066   } else {
 6067     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 6068     call_offset = offset();
 6069   }
 6070 
 6071   bind(skip);
 6072   return call_offset;
 6073 }
 6074 
 6075 // Move a value between registers/stack slots and update the reg_state
 6076 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6077   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6078   if (reg_state[to->value()] == reg_written) {
 6079     return true; // Already written
 6080   }
 6081   if (from != to && bt != T_VOID) {
 6082     if (reg_state[to->value()] == reg_readonly) {
 6083       return false; // Not yet writable
 6084     }
 6085     if (from->is_reg()) {
 6086       if (to->is_reg()) {
 6087         if (from->is_XMMRegister()) {
 6088           if (bt == T_DOUBLE) {
 6089             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6090           } else {
 6091             assert(bt == T_FLOAT, "must be float");
 6092             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6093           }
 6094         } else {
 6095           movq(to->as_Register(), from->as_Register());
 6096         }
 6097       } else {
 6098         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6099         Address to_addr = Address(rsp, st_off);
 6100         if (from->is_XMMRegister()) {
 6101           if (bt == T_DOUBLE) {
 6102             movdbl(to_addr, from->as_XMMRegister());
 6103           } else {
 6104             assert(bt == T_FLOAT, "must be float");
 6105             movflt(to_addr, from->as_XMMRegister());
 6106           }
 6107         } else {
 6108           movq(to_addr, from->as_Register());
 6109         }
 6110       }
 6111     } else {
 6112       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6113       if (to->is_reg()) {
 6114         if (to->is_XMMRegister()) {
 6115           if (bt == T_DOUBLE) {
 6116             movdbl(to->as_XMMRegister(), from_addr);
 6117           } else {
 6118             assert(bt == T_FLOAT, "must be float");
 6119             movflt(to->as_XMMRegister(), from_addr);
 6120           }
 6121         } else {
 6122           movq(to->as_Register(), from_addr);
 6123         }
 6124       } else {
 6125         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6126         movq(r13, from_addr);
 6127         movq(Address(rsp, st_off), r13);
 6128       }
 6129     }
 6130   }
 6131   // Update register states
 6132   reg_state[from->value()] = reg_writable;
 6133   reg_state[to->value()] = reg_written;
 6134   return true;
 6135 }
 6136 
 6137 // Calculate the extra stack space required for packing or unpacking inline
 6138 // args and adjust the stack pointer
 6139 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6140   // Two additional slots to account for return address
 6141   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6142   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6143   // Save the return address, adjust the stack (make sure it is properly
 6144   // 16-byte aligned) and copy the return address to the new top of the stack.
 6145   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6146   assert(sp_inc > 0, "sanity");
 6147   pop(r13);
 6148   subptr(rsp, sp_inc);
 6149   push(r13);
 6150   return sp_inc;
 6151 }
 6152 
 6153 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6154 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6155                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6156                                           RegState reg_state[]) {
 6157   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6158   assert(from->is_valid(), "source must be valid");
 6159   bool progress = false;
 6160 #ifdef ASSERT
 6161   const int start_offset = offset();
 6162 #endif
 6163 
 6164   Label L_null, L_notNull;
 6165   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6166   Register tmp1 = r10;
 6167   Register tmp2 = r13;
 6168   Register fromReg = noreg;
 6169   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
 6170   bool done = true;
 6171   bool mark_done = true;
 6172   VMReg toReg;
 6173   BasicType bt;
 6174   // Check if argument requires a null check
 6175   bool null_check = false;
 6176   VMReg nullCheckReg;
 6177   while (stream.next(nullCheckReg, bt)) {
 6178     if (sig->at(stream.sig_index())._offset == -1) {
 6179       null_check = true;
 6180       break;
 6181     }
 6182   }
 6183   stream.reset(sig_index, to_index);
 6184   while (stream.next(toReg, bt)) {
 6185     assert(toReg->is_valid(), "destination must be valid");
 6186     int idx = (int)toReg->value();
 6187     if (reg_state[idx] == reg_readonly) {
 6188       if (idx != from->value()) {
 6189         mark_done = false;
 6190       }
 6191       done = false;
 6192       continue;
 6193     } else if (reg_state[idx] == reg_written) {
 6194       continue;
 6195     }
 6196     assert(reg_state[idx] == reg_writable, "must be writable");
 6197     reg_state[idx] = reg_written;
 6198     progress = true;
 6199 
 6200     if (fromReg == noreg) {
 6201       if (from->is_reg()) {
 6202         fromReg = from->as_Register();
 6203       } else {
 6204         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6205         movq(tmp1, Address(rsp, st_off));
 6206         fromReg = tmp1;
 6207       }
 6208       if (null_check) {
 6209         // Nullable inline type argument, emit null check
 6210         testptr(fromReg, fromReg);
 6211         jcc(Assembler::zero, L_null);
 6212       }
 6213     }
 6214     int off = sig->at(stream.sig_index())._offset;
 6215     if (off == -1) {
 6216       assert(null_check, "Missing null check at");
 6217       if (toReg->is_stack()) {
 6218         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6219         movq(Address(rsp, st_off), 1);
 6220       } else {
 6221         movq(toReg->as_Register(), 1);
 6222       }
 6223       continue;
 6224     }
 6225     assert(off > 0, "offset in object should be positive");
 6226     Address fromAddr = Address(fromReg, off);
 6227     if (!toReg->is_XMMRegister()) {
 6228       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6229       if (is_reference_type(bt)) {
 6230         load_heap_oop(dst, fromAddr);
 6231       } else {
 6232         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6233         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6234       }
 6235       if (toReg->is_stack()) {
 6236         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6237         movq(Address(rsp, st_off), dst);
 6238       }
 6239     } else if (bt == T_DOUBLE) {
 6240       movdbl(toReg->as_XMMRegister(), fromAddr);
 6241     } else {
 6242       assert(bt == T_FLOAT, "must be float");
 6243       movflt(toReg->as_XMMRegister(), fromAddr);
 6244     }
 6245   }
 6246   if (progress && null_check) {
 6247     if (done) {
 6248       jmp(L_notNull);
 6249       bind(L_null);
 6250       // Set null marker to zero to signal that the argument is null.
 6251       // Also set all oop fields to zero to make the GC happy.
 6252       stream.reset(sig_index, to_index);
 6253       while (stream.next(toReg, bt)) {
 6254         if (sig->at(stream.sig_index())._offset == -1 ||
 6255             bt == T_OBJECT || bt == T_ARRAY) {
 6256           if (toReg->is_stack()) {
 6257             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6258             movq(Address(rsp, st_off), 0);
 6259           } else {
 6260             xorq(toReg->as_Register(), toReg->as_Register());
 6261           }
 6262         }
 6263       }
 6264       bind(L_notNull);
 6265     } else {
 6266       bind(L_null);
 6267     }
 6268   }
 6269 
 6270   sig_index = stream.sig_index();
 6271   to_index = stream.regs_index();
 6272 
 6273   if (mark_done && reg_state[from->value()] != reg_written) {
 6274     // This is okay because no one else will write to that slot
 6275     reg_state[from->value()] = reg_writable;
 6276   }
 6277   from_index--;
 6278   assert(progress || (start_offset == offset()), "should not emit code");
 6279   return done;
 6280 }
 6281 
 6282 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6283                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6284                                         RegState reg_state[], Register val_array) {
 6285   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
 6286   assert(to->is_valid(), "destination must be valid");
 6287 
 6288   if (reg_state[to->value()] == reg_written) {
 6289     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6290     return true; // Already written
 6291   }
 6292 
 6293   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6294   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6295   Register val_obj_tmp = r11;
 6296   Register from_reg_tmp = r14;
 6297   Register tmp1 = r10;
 6298   Register tmp2 = r13;
 6299   Register tmp3 = rbx;
 6300   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6301 
 6302   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6303 
 6304   if (reg_state[to->value()] == reg_readonly) {
 6305     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6306       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6307       return false; // Not yet writable
 6308     }
 6309     val_obj = val_obj_tmp;
 6310   }
 6311 
 6312   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
 6313   load_heap_oop(val_obj, Address(val_array, index));
 6314 
 6315   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6316   VMReg fromReg;
 6317   BasicType bt;
 6318   Label L_null;
 6319   while (stream.next(fromReg, bt)) {
 6320     assert(fromReg->is_valid(), "source must be valid");
 6321     reg_state[fromReg->value()] = reg_writable;
 6322 
 6323     int off = sig->at(stream.sig_index())._offset;
 6324     if (off == -1) {
 6325       // Nullable inline type argument, emit null check
 6326       Label L_notNull;
 6327       if (fromReg->is_stack()) {
 6328         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6329         testb(Address(rsp, ld_off), 1);
 6330       } else {
 6331         testb(fromReg->as_Register(), 1);
 6332       }
 6333       jcc(Assembler::notZero, L_notNull);
 6334       movptr(val_obj, 0);
 6335       jmp(L_null);
 6336       bind(L_notNull);
 6337       continue;
 6338     }
 6339 
 6340     assert(off > 0, "offset in object should be positive");
 6341     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6342 
 6343     // Pack the scalarized field into the value object.
 6344     Address dst(val_obj, off);
 6345     if (!fromReg->is_XMMRegister()) {
 6346       Register src;
 6347       if (fromReg->is_stack()) {
 6348         src = from_reg_tmp;
 6349         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6350         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6351       } else {
 6352         src = fromReg->as_Register();
 6353       }
 6354       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6355       if (is_reference_type(bt)) {
 6356         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6357       } else {
 6358         store_sized_value(dst, src, size_in_bytes);
 6359       }
 6360     } else if (bt == T_DOUBLE) {
 6361       movdbl(dst, fromReg->as_XMMRegister());
 6362     } else {
 6363       assert(bt == T_FLOAT, "must be float");
 6364       movflt(dst, fromReg->as_XMMRegister());
 6365     }
 6366   }
 6367   bind(L_null);
 6368   sig_index = stream.sig_index();
 6369   from_index = stream.regs_index();
 6370 
 6371   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6372   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6373   assert(success, "to register must be writeable");
 6374   return true;
 6375 }
 6376 
 6377 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6378   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6379 }
 6380 
 6381 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6382   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6383   if (needs_stack_repair) {
 6384     // TODO 8284443 Add a comment drawing the frame like in Aarch64's version of MacroAssembler::remove_frame
 6385     movq(rbp, Address(rsp, initial_framesize));
 6386     // The stack increment resides just below the saved rbp
 6387     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6388   } else {
 6389     if (initial_framesize > 0) {
 6390       addq(rsp, initial_framesize);
 6391     }
 6392     pop(rbp);
 6393   }
 6394 }
 6395 
 6396 // Clearing constant sized memory using YMM/ZMM registers.
 6397 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6398   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 6399   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6400 
 6401   int vector64_count = (cnt & (~0x7)) >> 3;
 6402   cnt = cnt & 0x7;
 6403   const int fill64_per_loop = 4;
 6404   const int max_unrolled_fill64 = 8;
 6405 
 6406   // 64 byte initialization loop.
 6407   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 6408   int start64 = 0;
 6409   if (vector64_count > max_unrolled_fill64) {
 6410     Label LOOP;
 6411     Register index = rtmp;
 6412 
 6413     start64 = vector64_count - (vector64_count % fill64_per_loop);
 6414 
 6415     movl(index, 0);

 6465         break;
 6466       case 7:
 6467         if (use64byteVector) {
 6468           movl(rtmp, 0x7F);
 6469           kmovwl(mask, rtmp);
 6470           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6471         } else {
 6472           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6473           movl(rtmp, 0x7);
 6474           kmovwl(mask, rtmp);
 6475           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6476         }
 6477         break;
 6478       default:
 6479         fatal("Unexpected length : %d\n",cnt);
 6480         break;
 6481     }
 6482   }
 6483 }
 6484 
 6485 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6486                                bool is_large, bool word_copy_only, KRegister mask) {
 6487   // cnt      - number of qwords (8-byte words).
 6488   // base     - start address, qword aligned.
 6489   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6490   assert(base==rdi, "base register must be edi for rep stos");
 6491   assert(val==rax,   "val register must be eax for rep stos");
 6492   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6493   assert(InitArrayShortSize % BytesPerLong == 0,
 6494     "InitArrayShortSize should be the multiple of BytesPerLong");
 6495 
 6496   Label DONE;



 6497 
 6498   if (!is_large) {
 6499     Label LOOP, LONG;
 6500     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6501     jccb(Assembler::greater, LONG);
 6502 
 6503     decrement(cnt);
 6504     jccb(Assembler::negative, DONE); // Zero length
 6505 
 6506     // Use individual pointer-sized stores for small counts:
 6507     BIND(LOOP);
 6508     movptr(Address(base, cnt, Address::times_ptr), val);
 6509     decrement(cnt);
 6510     jccb(Assembler::greaterEqual, LOOP);
 6511     jmpb(DONE);
 6512 
 6513     BIND(LONG);
 6514   }
 6515 
 6516   // Use longer rep-prefixed ops for non-small counts:
 6517   if (UseFastStosb && !word_copy_only) {
 6518     shlptr(cnt, 3); // convert to number of bytes
 6519     rep_stosb();
 6520   } else if (UseXMMForObjInit) {
 6521     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6522   } else {
 6523     rep_stos();
 6524   }
 6525 
 6526   BIND(DONE);
 6527 }
 6528 
 6529 #endif //COMPILER2_OR_JVMCI
 6530 
 6531 
 6532 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6533                                    Register to, Register value, Register count,
 6534                                    Register rtmp, XMMRegister xtmp) {
 6535   ShortBranchVerifier sbv(this);
 6536   assert_different_registers(to, value, count, rtmp);
 6537   Label L_exit;
 6538   Label L_fill_2_bytes, L_fill_4_bytes;
 6539 
 6540 #if defined(COMPILER2)
 6541   if(MaxVectorSize >=32 &&

10421 
10422   // Load top.
10423   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10424 
10425   // Check if the lock-stack is full.
10426   cmpl(top, LockStack::end_offset());
10427   jcc(Assembler::greaterEqual, slow);
10428 
10429   // Check for recursion.
10430   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10431   jcc(Assembler::equal, push);
10432 
10433   // Check header for monitor (0b10).
10434   testptr(reg_rax, markWord::monitor_value);
10435   jcc(Assembler::notZero, slow);
10436 
10437   // Try to lock. Transition lock bits 0b01 => 0b00
10438   movptr(tmp, reg_rax);
10439   andptr(tmp, ~(int32_t)markWord::unlocked_value);
10440   orptr(reg_rax, markWord::unlocked_value);
10441   // Mask inline_type bit such that we go to the slow path if object is an inline type
10442   andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10443 
10444   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10445   jcc(Assembler::notEqual, slow);
10446 
10447   // Restore top, CAS clobbers register.
10448   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10449 
10450   bind(push);
10451   // After successful lock, push object on lock-stack.
10452   movptr(Address(thread, top), obj);
10453   incrementl(top, oopSize);
10454   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10455 }
10456 
10457 // Implements lightweight-unlocking.
10458 //
10459 // obj: the object to be unlocked
10460 // reg_rax: rax
10461 // thread: the thread
10462 // tmp: a temporary register
10463 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
< prev index next >