< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

    1 /*
    2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    4  *
    5  * This code is free software; you can redistribute it and/or modify it
    6  * under the terms of the GNU General Public License version 2 only, as
    7  * published by the Free Software Foundation.
    8  *
    9  * This code is distributed in the hope that it will be useful, but WITHOUT
   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/aotCodeCache.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"

   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "interpreter/interpreterRuntime.hpp"
   39 #include "jvm.h"
   40 #include "memory/resourceArea.hpp"
   41 #include "memory/universe.hpp"
   42 #include "oops/accessDecorators.hpp"
   43 #include "oops/compressedKlass.inline.hpp"
   44 #include "oops/compressedOops.inline.hpp"
   45 #include "oops/klass.inline.hpp"

   46 #include "prims/methodHandles.hpp"

   47 #include "runtime/continuation.hpp"
   48 #include "runtime/interfaceSupport.inline.hpp"
   49 #include "runtime/javaThread.hpp"
   50 #include "runtime/jniHandles.hpp"
   51 #include "runtime/objectMonitor.hpp"
   52 #include "runtime/os.hpp"
   53 #include "runtime/safepoint.hpp"
   54 #include "runtime/safepointMechanism.hpp"
   55 #include "runtime/sharedRuntime.hpp"

   56 #include "runtime/stubRoutines.hpp"
   57 #include "utilities/checkedCast.hpp"
   58 #include "utilities/macros.hpp"




   59 
   60 #ifdef PRODUCT
   61 #define BLOCK_COMMENT(str) /* nothing */
   62 #define STOP(error) stop(error)
   63 #else
   64 #define BLOCK_COMMENT(str) block_comment(str)
   65 #define STOP(error) block_comment(error); stop(error)
   66 #endif
   67 
   68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   69 
   70 #ifdef ASSERT
   71 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   72 #endif
   73 
   74 static const Assembler::Condition reverse[] = {
   75     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   76     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   77     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   78     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1286 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1287   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1288   assert_different_registers(arg_1, c_rarg2);
 1289   pass_arg2(this, arg_2);
 1290   pass_arg1(this, arg_1);
 1291   pass_arg0(this, arg_0);
 1292   call_VM_leaf(entry_point, 3);
 1293 }
 1294 
 1295 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1296   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1297   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1298   assert_different_registers(arg_2, c_rarg3);
 1299   pass_arg3(this, arg_3);
 1300   pass_arg2(this, arg_2);
 1301   pass_arg1(this, arg_1);
 1302   pass_arg0(this, arg_0);
 1303   call_VM_leaf(entry_point, 3);
 1304 }
 1305 




 1306 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1307   pass_arg0(this, arg_0);
 1308   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1309 }
 1310 
 1311 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1312   assert_different_registers(arg_0, c_rarg1);
 1313   pass_arg1(this, arg_1);
 1314   pass_arg0(this, arg_0);
 1315   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1316 }
 1317 
 1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1319   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1320   assert_different_registers(arg_1, c_rarg2);
 1321   pass_arg2(this, arg_2);
 1322   pass_arg1(this, arg_1);
 1323   pass_arg0(this, arg_0);
 1324   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1325 }

 2339     lea(rscratch, src);
 2340     Assembler::mulss(dst, Address(rscratch, 0));
 2341   }
 2342 }
 2343 
 2344 void MacroAssembler::null_check(Register reg, int offset) {
 2345   if (needs_explicit_null_check(offset)) {
 2346     // provoke OS null exception if reg is null by
 2347     // accessing M[reg] w/o changing any (non-CC) registers
 2348     // NOTE: cmpl is plenty here to provoke a segv
 2349     cmpptr(rax, Address(reg, 0));
 2350     // Note: should probably use testl(rax, Address(reg, 0));
 2351     //       may be shorter code (however, this version of
 2352     //       testl needs to be implemented first)
 2353   } else {
 2354     // nothing to do, (later) access of M[reg + offset]
 2355     // will provoke OS null exception if reg is null
 2356   }
 2357 }
 2358 











































































































 2359 void MacroAssembler::os_breakpoint() {
 2360   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2361   // (e.g., MSVC can't call ps() otherwise)
 2362   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2363 }
 2364 
 2365 void MacroAssembler::unimplemented(const char* what) {
 2366   const char* buf = nullptr;
 2367   {
 2368     ResourceMark rm;
 2369     stringStream ss;
 2370     ss.print("unimplemented: %s", what);
 2371     buf = code_string(ss.as_string());
 2372   }
 2373   stop(buf);
 2374 }
 2375 
 2376 #define XSTATE_BV 0x200
 2377 
 2378 void MacroAssembler::pop_CPU_state() {

 3421 }
 3422 
 3423 // C++ bool manipulation
 3424 void MacroAssembler::testbool(Register dst) {
 3425   if(sizeof(bool) == 1)
 3426     testb(dst, 0xff);
 3427   else if(sizeof(bool) == 2) {
 3428     // testw implementation needed for two byte bools
 3429     ShouldNotReachHere();
 3430   } else if(sizeof(bool) == 4)
 3431     testl(dst, dst);
 3432   else
 3433     // unsupported
 3434     ShouldNotReachHere();
 3435 }
 3436 
 3437 void MacroAssembler::testptr(Register dst, Register src) {
 3438   testq(dst, src);
 3439 }
 3440 






















































































































 3441 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3442 void MacroAssembler::tlab_allocate(Register obj,
 3443                                    Register var_size_in_bytes,
 3444                                    int con_size_in_bytes,
 3445                                    Register t1,
 3446                                    Register t2,
 3447                                    Label& slow_case) {
 3448   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3449   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3450 }
 3451 
 3452 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3453   RegSet regs;
 3454   regs += RegSet::of(rax, rcx, rdx);
 3455 #ifndef _WINDOWS
 3456   regs += RegSet::of(rsi, rdi);
 3457 #endif
 3458   regs += RegSet::range(r8, r11);
 3459   if (UseAPX) {
 3460     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));

 3624   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3625   if (UseIncDec) {
 3626     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3627   } else {
 3628     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3629     shrptr(index, 1);
 3630   }
 3631 
 3632   // initialize remaining object fields: index is a multiple of 2 now
 3633   {
 3634     Label loop;
 3635     bind(loop);
 3636     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3637     decrement(index);
 3638     jcc(Assembler::notZero, loop);
 3639   }
 3640 
 3641   bind(done);
 3642 }
 3643 



























 3644 // Look up the method for a megamorphic invokeinterface call.
 3645 // The target method is determined by <intf_klass, itable_index>.
 3646 // The receiver klass is in recv_klass.
 3647 // On success, the result will be in method_result, and execution falls through.
 3648 // On failure, execution transfers to the given label.
 3649 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3650                                              Register intf_klass,
 3651                                              RegisterOrConstant itable_index,
 3652                                              Register method_result,
 3653                                              Register scan_temp,
 3654                                              Label& L_no_such_interface,
 3655                                              bool return_method) {
 3656   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3657   assert_different_registers(method_result, intf_klass, scan_temp);
 3658   assert(recv_klass != method_result || !return_method,
 3659          "recv_klass can be destroyed when method isn't needed");
 3660 
 3661   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3662          "caller must use same register for non-constant itable index as for method");
 3663 

 4674   } else {
 4675     Label L;
 4676     jccb(negate_condition(cc), L);
 4677     movl(dst, src);
 4678     bind(L);
 4679   }
 4680 }
 4681 
 4682 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4683   if (VM_Version::supports_cmov()) {
 4684     cmovl(cc, dst, src);
 4685   } else {
 4686     Label L;
 4687     jccb(negate_condition(cc), L);
 4688     movl(dst, src);
 4689     bind(L);
 4690   }
 4691 }
 4692 
 4693 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4694   if (!VerifyOops) return;




 4695 
 4696   BLOCK_COMMENT("verify_oop {");
 4697   push(rscratch1);
 4698   push(rax);                          // save rax
 4699   push(reg);                          // pass register argument
 4700 
 4701   // Pass register number to verify_oop_subroutine
 4702   const char* b = nullptr;
 4703   {
 4704     ResourceMark rm;
 4705     stringStream ss;
 4706     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4707     b = code_string(ss.as_string());
 4708   }
 4709   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4710   pushptr(buffer.addr(), rscratch1);
 4711 
 4712   // call indirectly to solve generation ordering problem
 4713   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4714   call(rax);

 4733   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4734   int stackElementSize = Interpreter::stackElementSize;
 4735   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4736 #ifdef ASSERT
 4737   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4738   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4739 #endif
 4740   Register             scale_reg    = noreg;
 4741   Address::ScaleFactor scale_factor = Address::no_scale;
 4742   if (arg_slot.is_constant()) {
 4743     offset += arg_slot.as_constant() * stackElementSize;
 4744   } else {
 4745     scale_reg    = arg_slot.as_register();
 4746     scale_factor = Address::times(stackElementSize);
 4747   }
 4748   offset += wordSize;           // return PC is on stack
 4749   return Address(rsp, scale_reg, scale_factor, offset);
 4750 }
 4751 
 4752 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4753   if (!VerifyOops) return;




 4754 
 4755   push(rscratch1);
 4756   push(rax); // save rax,
 4757   // addr may contain rsp so we will have to adjust it based on the push
 4758   // we just did (and on 64 bit we do two pushes)
 4759   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4760   // stores rax into addr which is backwards of what was intended.
 4761   if (addr.uses(rsp)) {
 4762     lea(rax, addr);
 4763     pushptr(Address(rax, 2 * BytesPerWord));
 4764   } else {
 4765     pushptr(addr);
 4766   }
 4767 
 4768   // Pass register number to verify_oop_subroutine
 4769   const char* b = nullptr;
 4770   {
 4771     ResourceMark rm;
 4772     stringStream ss;
 4773     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);

 5127 
 5128 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5129   // get mirror
 5130   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5131   load_method_holder(mirror, method);
 5132   movptr(mirror, Address(mirror, mirror_offset));
 5133   resolve_oop_handle(mirror, tmp);
 5134 }
 5135 
 5136 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5137   load_method_holder(rresult, rmethod);
 5138   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5139 }
 5140 
 5141 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5142   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5143   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5144   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5145 }
 5146 










 5147 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5148   assert(UseCompactObjectHeaders, "expect compact object headers");
 5149   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5150   shrq(dst, markWord::klass_shift);
 5151 }
 5152 
 5153 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5154   assert_different_registers(src, tmp);
 5155   assert_different_registers(dst, tmp);
 5156 
 5157   if (UseCompactObjectHeaders) {
 5158     load_narrow_klass_compact(dst, src);
 5159     decode_klass_not_null(dst, tmp);
 5160   } else if (UseCompressedClassPointers) {
 5161     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5162     decode_klass_not_null(dst, tmp);
 5163   } else {
 5164     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5165   }
 5166 }
 5167 





 5168 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5169   assert(!UseCompactObjectHeaders, "not with compact headers");
 5170   assert_different_registers(src, tmp);
 5171   assert_different_registers(dst, tmp);
 5172   if (UseCompressedClassPointers) {
 5173     encode_klass_not_null(src, tmp);
 5174     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5175   } else {
 5176     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5177   }
 5178 }
 5179 
 5180 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5181   if (UseCompactObjectHeaders) {
 5182     assert(tmp != noreg, "need tmp");
 5183     assert_different_registers(klass, obj, tmp);
 5184     load_narrow_klass_compact(tmp, obj);
 5185     cmpl(klass, tmp);
 5186   } else if (UseCompressedClassPointers) {
 5187     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));

 5213   bool as_raw = (decorators & AS_RAW) != 0;
 5214   if (as_raw) {
 5215     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5216   } else {
 5217     bs->load_at(this, decorators, type, dst, src, tmp1);
 5218   }
 5219 }
 5220 
 5221 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5222                                      Register tmp1, Register tmp2, Register tmp3) {
 5223   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5224   decorators = AccessInternal::decorator_fixup(decorators, type);
 5225   bool as_raw = (decorators & AS_RAW) != 0;
 5226   if (as_raw) {
 5227     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5228   } else {
 5229     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5230   }
 5231 }
 5232 








































 5233 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5234   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5235 }
 5236 
 5237 // Doesn't do verification, generates fixed size code
 5238 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5239   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5240 }
 5241 
 5242 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5243                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5244   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5245 }
 5246 
 5247 // Used for storing nulls.
 5248 void MacroAssembler::store_heap_oop_null(Address dst) {
 5249   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5250 }
 5251 
 5252 void MacroAssembler::store_klass_gap(Register dst, Register src) {

 5569   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5570   int klass_index = oop_recorder()->find_index(k);
 5571   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5572   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5573 }
 5574 
 5575 void MacroAssembler::reinit_heapbase() {
 5576   if (UseCompressedOops) {
 5577     if (Universe::heap() != nullptr) {
 5578       if (CompressedOops::base() == nullptr) {
 5579         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5580       } else {
 5581         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 5582       }
 5583     } else {
 5584       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 5585     }
 5586   }
 5587 }
 5588 




























































































































































































































































































































































































































































































 5589 #if COMPILER2_OR_JVMCI
 5590 
 5591 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5592 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5593   // cnt - number of qwords (8-byte words).
 5594   // base - start address, qword aligned.
 5595   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5596   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5597   if (use64byteVector) {
 5598     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
 5599   } else if (MaxVectorSize >= 32) {
 5600     vpxor(xtmp, xtmp, xtmp, AVX_256bit);


 5601   } else {
 5602     pxor(xtmp, xtmp);

 5603   }
 5604   jmp(L_zero_64_bytes);
 5605 
 5606   BIND(L_loop);
 5607   if (MaxVectorSize >= 32) {
 5608     fill64(base, 0, xtmp, use64byteVector);
 5609   } else {
 5610     movdqu(Address(base,  0), xtmp);
 5611     movdqu(Address(base, 16), xtmp);
 5612     movdqu(Address(base, 32), xtmp);
 5613     movdqu(Address(base, 48), xtmp);
 5614   }
 5615   addptr(base, 64);
 5616 
 5617   BIND(L_zero_64_bytes);
 5618   subptr(cnt, 8);
 5619   jccb(Assembler::greaterEqual, L_loop);
 5620 
 5621   // Copy trailing 64 bytes
 5622   if (use64byteVector) {
 5623     addptr(cnt, 8);
 5624     jccb(Assembler::equal, L_end);
 5625     fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
 5626     jmp(L_end);
 5627   } else {
 5628     addptr(cnt, 4);
 5629     jccb(Assembler::less, L_tail);
 5630     if (MaxVectorSize >= 32) {
 5631       vmovdqu(Address(base, 0), xtmp);
 5632     } else {
 5633       movdqu(Address(base,  0), xtmp);
 5634       movdqu(Address(base, 16), xtmp);
 5635     }
 5636   }
 5637   addptr(base, 32);
 5638   subptr(cnt, 4);
 5639 
 5640   BIND(L_tail);
 5641   addptr(cnt, 4);
 5642   jccb(Assembler::lessEqual, L_end);
 5643   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5644     fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
 5645   } else {
 5646     decrement(cnt);
 5647 
 5648     BIND(L_sloop);
 5649     movq(Address(base, 0), xtmp);
 5650     addptr(base, 8);
 5651     decrement(cnt);
 5652     jccb(Assembler::greaterEqual, L_sloop);
 5653   }
 5654   BIND(L_end);
 5655 }
 5656 
 5657 // Clearing constant sized memory using YMM/ZMM registers.
 5658 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5659   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 5660   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 5661 
 5662   int vector64_count = (cnt & (~0x7)) >> 3;
 5663   cnt = cnt & 0x7;
 5664   const int fill64_per_loop = 4;

 5726         break;
 5727       case 7:
 5728         if (use64byteVector) {
 5729           movl(rtmp, 0x7F);
 5730           kmovwl(mask, rtmp);
 5731           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5732         } else {
 5733           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5734           movl(rtmp, 0x7);
 5735           kmovwl(mask, rtmp);
 5736           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 5737         }
 5738         break;
 5739       default:
 5740         fatal("Unexpected length : %d\n",cnt);
 5741         break;
 5742     }
 5743   }
 5744 }
 5745 
 5746 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
 5747                                bool is_large, KRegister mask) {
 5748   // cnt      - number of qwords (8-byte words).
 5749   // base     - start address, qword aligned.
 5750   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 5751   assert(base==rdi, "base register must be edi for rep stos");
 5752   assert(tmp==rax,   "tmp register must be eax for rep stos");
 5753   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 5754   assert(InitArrayShortSize % BytesPerLong == 0,
 5755     "InitArrayShortSize should be the multiple of BytesPerLong");
 5756 
 5757   Label DONE;
 5758   if (!is_large || !UseXMMForObjInit) {
 5759     xorptr(tmp, tmp);
 5760   }
 5761 
 5762   if (!is_large) {
 5763     Label LOOP, LONG;
 5764     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 5765     jccb(Assembler::greater, LONG);
 5766 
 5767     decrement(cnt);
 5768     jccb(Assembler::negative, DONE); // Zero length
 5769 
 5770     // Use individual pointer-sized stores for small counts:
 5771     BIND(LOOP);
 5772     movptr(Address(base, cnt, Address::times_ptr), tmp);
 5773     decrement(cnt);
 5774     jccb(Assembler::greaterEqual, LOOP);
 5775     jmpb(DONE);
 5776 
 5777     BIND(LONG);
 5778   }
 5779 
 5780   // Use longer rep-prefixed ops for non-small counts:
 5781   if (UseFastStosb) {
 5782     shlptr(cnt, 3); // convert to number of bytes
 5783     rep_stosb();
 5784   } else if (UseXMMForObjInit) {
 5785     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
 5786   } else {
 5787     rep_stos();
 5788   }
 5789 
 5790   BIND(DONE);
 5791 }
 5792 
 5793 #endif //COMPILER2_OR_JVMCI
 5794 
 5795 
 5796 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 5797                                    Register to, Register value, Register count,
 5798                                    Register rtmp, XMMRegister xtmp) {
 5799   ShortBranchVerifier sbv(this);
 5800   assert_different_registers(to, value, count, rtmp);
 5801   Label L_exit;
 5802   Label L_fill_2_bytes, L_fill_4_bytes;
 5803 
 5804 #if defined(COMPILER2)
 5805   if(MaxVectorSize >=32 &&

 9685 
 9686   // Load top.
 9687   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9688 
 9689   // Check if the lock-stack is full.
 9690   cmpl(top, LockStack::end_offset());
 9691   jcc(Assembler::greaterEqual, slow);
 9692 
 9693   // Check for recursion.
 9694   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
 9695   jcc(Assembler::equal, push);
 9696 
 9697   // Check header for monitor (0b10).
 9698   testptr(reg_rax, markWord::monitor_value);
 9699   jcc(Assembler::notZero, slow);
 9700 
 9701   // Try to lock. Transition lock bits 0b01 => 0b00
 9702   movptr(tmp, reg_rax);
 9703   andptr(tmp, ~(int32_t)markWord::unlocked_value);
 9704   orptr(reg_rax, markWord::unlocked_value);



 9705   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
 9706   jcc(Assembler::notEqual, slow);
 9707 
 9708   // Restore top, CAS clobbers register.
 9709   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9710 
 9711   bind(push);
 9712   // After successful lock, push object on lock-stack.
 9713   movptr(Address(thread, top), obj);
 9714   incrementl(top, oopSize);
 9715   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
 9716 }
 9717 
 9718 // Implements fast-unlocking.
 9719 //
 9720 // obj: the object to be unlocked
 9721 // reg_rax: rax
 9722 // thread: the thread
 9723 // tmp: a temporary register
 9724 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {

    1 /*
    2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
    3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    4  *
    5  * This code is free software; you can redistribute it and/or modify it
    6  * under the terms of the GNU General Public License version 2 only, as
    7  * published by the Free Software Foundation.
    8  *
    9  * This code is distributed in the hope that it will be useful, but WITHOUT
   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/aotCodeCache.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"
   31 #include "ci/ciInlineKlass.hpp"
   32 #include "crc32c.h"
   33 #include "gc/shared/barrierSet.hpp"
   34 #include "gc/shared/barrierSetAssembler.hpp"
   35 #include "gc/shared/collectedHeap.inline.hpp"
   36 #include "gc/shared/tlab_globals.hpp"
   37 #include "interpreter/bytecodeHistogram.hpp"
   38 #include "interpreter/interpreter.hpp"
   39 #include "interpreter/interpreterRuntime.hpp"
   40 #include "jvm.h"
   41 #include "memory/resourceArea.hpp"
   42 #include "memory/universe.hpp"
   43 #include "oops/accessDecorators.hpp"
   44 #include "oops/compressedKlass.inline.hpp"
   45 #include "oops/compressedOops.inline.hpp"
   46 #include "oops/klass.inline.hpp"
   47 #include "oops/resolvedFieldEntry.hpp"
   48 #include "prims/methodHandles.hpp"
   49 #include "runtime/arguments.hpp"
   50 #include "runtime/continuation.hpp"
   51 #include "runtime/interfaceSupport.inline.hpp"
   52 #include "runtime/javaThread.hpp"
   53 #include "runtime/jniHandles.hpp"
   54 #include "runtime/objectMonitor.hpp"
   55 #include "runtime/os.hpp"
   56 #include "runtime/safepoint.hpp"
   57 #include "runtime/safepointMechanism.hpp"
   58 #include "runtime/sharedRuntime.hpp"
   59 #include "runtime/signature_cc.hpp"
   60 #include "runtime/stubRoutines.hpp"
   61 #include "utilities/checkedCast.hpp"
   62 #include "utilities/macros.hpp"
   63 #include "vmreg_x86.inline.hpp"
   64 #ifdef COMPILER2
   65 #include "opto/output.hpp"
   66 #endif
   67 
   68 #ifdef PRODUCT
   69 #define BLOCK_COMMENT(str) /* nothing */
   70 #define STOP(error) stop(error)
   71 #else
   72 #define BLOCK_COMMENT(str) block_comment(str)
   73 #define STOP(error) block_comment(error); stop(error)
   74 #endif
   75 
   76 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   77 
   78 #ifdef ASSERT
   79 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   80 #endif
   81 
   82 static const Assembler::Condition reverse[] = {
   83     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   84     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   85     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   86     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1294 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1295   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1296   assert_different_registers(arg_1, c_rarg2);
 1297   pass_arg2(this, arg_2);
 1298   pass_arg1(this, arg_1);
 1299   pass_arg0(this, arg_0);
 1300   call_VM_leaf(entry_point, 3);
 1301 }
 1302 
 1303 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1304   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1305   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1306   assert_different_registers(arg_2, c_rarg3);
 1307   pass_arg3(this, arg_3);
 1308   pass_arg2(this, arg_2);
 1309   pass_arg1(this, arg_1);
 1310   pass_arg0(this, arg_0);
 1311   call_VM_leaf(entry_point, 3);
 1312 }
 1313 
 1314 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1315   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1316 }
 1317 
 1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1319   pass_arg0(this, arg_0);
 1320   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1321 }
 1322 
 1323 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1324   assert_different_registers(arg_0, c_rarg1);
 1325   pass_arg1(this, arg_1);
 1326   pass_arg0(this, arg_0);
 1327   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1328 }
 1329 
 1330 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1331   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1332   assert_different_registers(arg_1, c_rarg2);
 1333   pass_arg2(this, arg_2);
 1334   pass_arg1(this, arg_1);
 1335   pass_arg0(this, arg_0);
 1336   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1337 }

 2351     lea(rscratch, src);
 2352     Assembler::mulss(dst, Address(rscratch, 0));
 2353   }
 2354 }
 2355 
 2356 void MacroAssembler::null_check(Register reg, int offset) {
 2357   if (needs_explicit_null_check(offset)) {
 2358     // provoke OS null exception if reg is null by
 2359     // accessing M[reg] w/o changing any (non-CC) registers
 2360     // NOTE: cmpl is plenty here to provoke a segv
 2361     cmpptr(rax, Address(reg, 0));
 2362     // Note: should probably use testl(rax, Address(reg, 0));
 2363     //       may be shorter code (however, this version of
 2364     //       testl needs to be implemented first)
 2365   } else {
 2366     // nothing to do, (later) access of M[reg + offset]
 2367     // will provoke OS null exception if reg is null
 2368   }
 2369 }
 2370 
 2371 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2372   andptr(markword, markWord::inline_type_mask_in_place);
 2373   cmpptr(markword, markWord::inline_type_pattern);
 2374   jcc(Assembler::equal, is_inline_type);
 2375 }
 2376 
 2377 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
 2378   if (can_be_null) {
 2379     testptr(object, object);
 2380     jcc(Assembler::zero, not_inline_type);
 2381   }
 2382   const int is_inline_type_mask = markWord::inline_type_pattern;
 2383   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2384   andptr(tmp, is_inline_type_mask);
 2385   cmpptr(tmp, is_inline_type_mask);
 2386   jcc(Assembler::notEqual, not_inline_type);
 2387 }
 2388 
 2389 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2390   movl(temp_reg, flags);
 2391   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2392   jcc(Assembler::notEqual, is_null_free_inline_type);
 2393 }
 2394 
 2395 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2396   movl(temp_reg, flags);
 2397   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2398   jcc(Assembler::equal, not_null_free_inline_type);
 2399 }
 2400 
 2401 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
 2402   movl(temp_reg, flags);
 2403   testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
 2404   jcc(Assembler::notEqual, is_flat);
 2405 }
 2406 
 2407 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
 2408   movl(temp_reg, flags);
 2409   testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
 2410   jcc(Assembler::notEqual, has_null_marker);
 2411 }
 2412 
 2413 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2414   Label test_mark_word;
 2415   // load mark word
 2416   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2417   // check displaced
 2418   testl(temp_reg, markWord::unlocked_value);
 2419   jccb(Assembler::notZero, test_mark_word);
 2420   // slow path use klass prototype
 2421   push(rscratch1);
 2422   load_prototype_header(temp_reg, oop, rscratch1);
 2423   pop(rscratch1);
 2424 
 2425   bind(test_mark_word);
 2426   testl(temp_reg, test_bit);
 2427   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2428 }
 2429 
 2430 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
 2431                                          Label& is_flat_array) {
 2432 #ifdef _LP64
 2433   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
 2434 #else
 2435   load_klass(temp_reg, oop, noreg);
 2436   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2437   test_flat_array_layout(temp_reg, is_flat_array);
 2438 #endif
 2439 }
 2440 
 2441 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
 2442                                              Label& is_non_flat_array) {
 2443 #ifdef _LP64
 2444   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
 2445 #else
 2446   load_klass(temp_reg, oop, noreg);
 2447   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2448   test_non_flat_array_layout(temp_reg, is_non_flat_array);
 2449 #endif
 2450 }
 2451 
 2452 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 2453 #ifdef _LP64
 2454   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 2455 #else
 2456   Unimplemented();
 2457 #endif
 2458 }
 2459 
 2460 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 2461 #ifdef _LP64
 2462   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 2463 #else
 2464   Unimplemented();
 2465 #endif
 2466 }
 2467 
 2468 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
 2469   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2470   jcc(Assembler::notZero, is_flat_array);
 2471 }
 2472 
 2473 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
 2474   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2475   jcc(Assembler::zero, is_non_flat_array);
 2476 }
 2477 
 2478 void MacroAssembler::os_breakpoint() {
 2479   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2480   // (e.g., MSVC can't call ps() otherwise)
 2481   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2482 }
 2483 
 2484 void MacroAssembler::unimplemented(const char* what) {
 2485   const char* buf = nullptr;
 2486   {
 2487     ResourceMark rm;
 2488     stringStream ss;
 2489     ss.print("unimplemented: %s", what);
 2490     buf = code_string(ss.as_string());
 2491   }
 2492   stop(buf);
 2493 }
 2494 
 2495 #define XSTATE_BV 0x200
 2496 
 2497 void MacroAssembler::pop_CPU_state() {

 3540 }
 3541 
 3542 // C++ bool manipulation
 3543 void MacroAssembler::testbool(Register dst) {
 3544   if(sizeof(bool) == 1)
 3545     testb(dst, 0xff);
 3546   else if(sizeof(bool) == 2) {
 3547     // testw implementation needed for two byte bools
 3548     ShouldNotReachHere();
 3549   } else if(sizeof(bool) == 4)
 3550     testl(dst, dst);
 3551   else
 3552     // unsupported
 3553     ShouldNotReachHere();
 3554 }
 3555 
 3556 void MacroAssembler::testptr(Register dst, Register src) {
 3557   testq(dst, src);
 3558 }
 3559 
 3560 // Object / value buffer allocation...
 3561 //
 3562 // Kills klass and rsi on LP64
 3563 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 3564                                        Register t1, Register t2,
 3565                                        bool clear_fields, Label& alloc_failed)
 3566 {
 3567   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 3568   Register layout_size = t1;
 3569   assert(new_obj == rax, "needs to be rax");
 3570   assert_different_registers(klass, new_obj, t1, t2);
 3571 
 3572   // get instance_size in InstanceKlass (scaled to a count of bytes)
 3573   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 3574   // test to see if it is malformed in some way
 3575   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 3576   jcc(Assembler::notZero, slow_case_no_pop);
 3577 
 3578   // Allocate the instance:
 3579   //  If TLAB is enabled:
 3580   //    Try to allocate in the TLAB.
 3581   //    If fails, go to the slow path.
 3582   //  Else If inline contiguous allocations are enabled:
 3583   //    Try to allocate in eden.
 3584   //    If fails due to heap end, go to slow path.
 3585   //
 3586   //  If TLAB is enabled OR inline contiguous is enabled:
 3587   //    Initialize the allocation.
 3588   //    Exit.
 3589   //
 3590   //  Go to slow path.
 3591 
 3592   push(klass);
 3593   if (UseTLAB) {
 3594     tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
 3595     if (ZeroTLAB || (!clear_fields)) {
 3596       // the fields have been already cleared
 3597       jmp(initialize_header);
 3598     } else {
 3599       // initialize both the header and fields
 3600       jmp(initialize_object);
 3601     }
 3602   } else {
 3603     jmp(slow_case);
 3604   }
 3605 
 3606   // If UseTLAB is true, the object is created above and there is an initialize need.
 3607   // Otherwise, skip and go to the slow path.
 3608   if (UseTLAB) {
 3609     if (clear_fields) {
 3610       // The object is initialized before the header.  If the object size is
 3611       // zero, go directly to the header initialization.
 3612       bind(initialize_object);
 3613       if (UseCompactObjectHeaders) {
 3614         assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
 3615         decrement(layout_size, oopDesc::base_offset_in_bytes());
 3616       } else {
 3617         decrement(layout_size, sizeof(oopDesc));
 3618       }
 3619       jcc(Assembler::zero, initialize_header);
 3620 
 3621       // Initialize topmost object field, divide size by 8, check if odd and
 3622       // test if zero.
 3623       Register zero = klass;
 3624       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 3625       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 3626 
 3627   #ifdef ASSERT
 3628       // make sure instance_size was multiple of 8
 3629       Label L;
 3630       // Ignore partial flag stall after shrl() since it is debug VM
 3631       jcc(Assembler::carryClear, L);
 3632       stop("object size is not multiple of 2 - adjust this code");
 3633       bind(L);
 3634       // must be > 0, no extra check needed here
 3635   #endif
 3636 
 3637       // initialize remaining object fields: instance_size was a multiple of 8
 3638       {
 3639         Label loop;
 3640         bind(loop);
 3641         int header_size_bytes = oopDesc::header_size() * HeapWordSize;
 3642         assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
 3643         movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
 3644         decrement(layout_size);
 3645         jcc(Assembler::notZero, loop);
 3646       }
 3647     } // clear_fields
 3648 
 3649     // initialize object header only.
 3650     bind(initialize_header);
 3651     if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
 3652       pop(klass);
 3653       Register mark_word = t2;
 3654       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 3655       movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 3656     } else {
 3657      movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
 3658             (intptr_t)markWord::prototype().value()); // header
 3659      pop(klass);   // get saved klass back in the register.
 3660     }
 3661     if (!UseCompactObjectHeaders) {
 3662       xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 3663       store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 3664       movptr(t2, klass);         // preserve klass
 3665       store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 3666     }
 3667     jmp(done);
 3668   }
 3669 
 3670   bind(slow_case);
 3671   pop(klass);
 3672   bind(slow_case_no_pop);
 3673   jmp(alloc_failed);
 3674 
 3675   bind(done);
 3676 }
 3677 
 3678 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3679 void MacroAssembler::tlab_allocate(Register obj,
 3680                                    Register var_size_in_bytes,
 3681                                    int con_size_in_bytes,
 3682                                    Register t1,
 3683                                    Register t2,
 3684                                    Label& slow_case) {
 3685   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3686   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3687 }
 3688 
 3689 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3690   RegSet regs;
 3691   regs += RegSet::of(rax, rcx, rdx);
 3692 #ifndef _WINDOWS
 3693   regs += RegSet::of(rsi, rdi);
 3694 #endif
 3695   regs += RegSet::range(r8, r11);
 3696   if (UseAPX) {
 3697     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));

 3861   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3862   if (UseIncDec) {
 3863     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3864   } else {
 3865     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3866     shrptr(index, 1);
 3867   }
 3868 
 3869   // initialize remaining object fields: index is a multiple of 2 now
 3870   {
 3871     Label loop;
 3872     bind(loop);
 3873     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3874     decrement(index);
 3875     jcc(Assembler::notZero, loop);
 3876   }
 3877 
 3878   bind(done);
 3879 }
 3880 
 3881 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
 3882   inline_layout_info(holder_klass, index, inline_klass);
 3883   movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
 3884 }
 3885 
 3886 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
 3887   movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
 3888 #ifdef ASSERT
 3889   {
 3890     Label done;
 3891     cmpptr(layout_info, 0);
 3892     jcc(Assembler::notEqual, done);
 3893     stop("inline_layout_info_array is null");
 3894     bind(done);
 3895   }
 3896 #endif
 3897 
 3898   InlineLayoutInfo array[2];
 3899   int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
 3900   if (is_power_of_2(size)) {
 3901     shll(index, log2i_exact(size)); // Scale index by power of 2
 3902   } else {
 3903     imull(index, index, size); // Scale the index to be the entry index * array_element_size
 3904   }
 3905   lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
 3906 }
 3907 
 3908 // Look up the method for a megamorphic invokeinterface call.
 3909 // The target method is determined by <intf_klass, itable_index>.
 3910 // The receiver klass is in recv_klass.
 3911 // On success, the result will be in method_result, and execution falls through.
 3912 // On failure, execution transfers to the given label.
 3913 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3914                                              Register intf_klass,
 3915                                              RegisterOrConstant itable_index,
 3916                                              Register method_result,
 3917                                              Register scan_temp,
 3918                                              Label& L_no_such_interface,
 3919                                              bool return_method) {
 3920   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3921   assert_different_registers(method_result, intf_klass, scan_temp);
 3922   assert(recv_klass != method_result || !return_method,
 3923          "recv_klass can be destroyed when method isn't needed");
 3924 
 3925   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3926          "caller must use same register for non-constant itable index as for method");
 3927 

 4938   } else {
 4939     Label L;
 4940     jccb(negate_condition(cc), L);
 4941     movl(dst, src);
 4942     bind(L);
 4943   }
 4944 }
 4945 
 4946 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4947   if (VM_Version::supports_cmov()) {
 4948     cmovl(cc, dst, src);
 4949   } else {
 4950     Label L;
 4951     jccb(negate_condition(cc), L);
 4952     movl(dst, src);
 4953     bind(L);
 4954   }
 4955 }
 4956 
 4957 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4958   if (!VerifyOops || VerifyAdapterSharing) {
 4959     // Below address of the code string confuses VerifyAdapterSharing
 4960     // because it may differ between otherwise equivalent adapters.
 4961     return;
 4962   }
 4963 
 4964   BLOCK_COMMENT("verify_oop {");
 4965   push(rscratch1);
 4966   push(rax);                          // save rax
 4967   push(reg);                          // pass register argument
 4968 
 4969   // Pass register number to verify_oop_subroutine
 4970   const char* b = nullptr;
 4971   {
 4972     ResourceMark rm;
 4973     stringStream ss;
 4974     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4975     b = code_string(ss.as_string());
 4976   }
 4977   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4978   pushptr(buffer.addr(), rscratch1);
 4979 
 4980   // call indirectly to solve generation ordering problem
 4981   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4982   call(rax);

 5001   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 5002   int stackElementSize = Interpreter::stackElementSize;
 5003   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 5004 #ifdef ASSERT
 5005   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 5006   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 5007 #endif
 5008   Register             scale_reg    = noreg;
 5009   Address::ScaleFactor scale_factor = Address::no_scale;
 5010   if (arg_slot.is_constant()) {
 5011     offset += arg_slot.as_constant() * stackElementSize;
 5012   } else {
 5013     scale_reg    = arg_slot.as_register();
 5014     scale_factor = Address::times(stackElementSize);
 5015   }
 5016   offset += wordSize;           // return PC is on stack
 5017   return Address(rsp, scale_reg, scale_factor, offset);
 5018 }
 5019 
 5020 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5021   if (!VerifyOops || VerifyAdapterSharing) {
 5022     // Below address of the code string confuses VerifyAdapterSharing
 5023     // because it may differ between otherwise equivalent adapters.
 5024     return;
 5025   }
 5026 
 5027   push(rscratch1);
 5028   push(rax); // save rax,
 5029   // addr may contain rsp so we will have to adjust it based on the push
 5030   // we just did (and on 64 bit we do two pushes)
 5031   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5032   // stores rax into addr which is backwards of what was intended.
 5033   if (addr.uses(rsp)) {
 5034     lea(rax, addr);
 5035     pushptr(Address(rax, 2 * BytesPerWord));
 5036   } else {
 5037     pushptr(addr);
 5038   }
 5039 
 5040   // Pass register number to verify_oop_subroutine
 5041   const char* b = nullptr;
 5042   {
 5043     ResourceMark rm;
 5044     stringStream ss;
 5045     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);

 5399 
 5400 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5401   // get mirror
 5402   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5403   load_method_holder(mirror, method);
 5404   movptr(mirror, Address(mirror, mirror_offset));
 5405   resolve_oop_handle(mirror, tmp);
 5406 }
 5407 
 5408 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5409   load_method_holder(rresult, rmethod);
 5410   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5411 }
 5412 
 5413 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5414   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5415   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5416   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5417 }
 5418 
 5419 void MacroAssembler::load_metadata(Register dst, Register src) {
 5420   if (UseCompactObjectHeaders) {
 5421     load_narrow_klass_compact(dst, src);
 5422   } else if (UseCompressedClassPointers) {
 5423     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5424   } else {
 5425     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5426   }
 5427 }
 5428 
 5429 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5430   assert(UseCompactObjectHeaders, "expect compact object headers");
 5431   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5432   shrq(dst, markWord::klass_shift);
 5433 }
 5434 
 5435 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5436   assert_different_registers(src, tmp);
 5437   assert_different_registers(dst, tmp);
 5438 
 5439   if (UseCompactObjectHeaders) {
 5440     load_narrow_klass_compact(dst, src);
 5441     decode_klass_not_null(dst, tmp);
 5442   } else if (UseCompressedClassPointers) {
 5443     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5444     decode_klass_not_null(dst, tmp);
 5445   } else {
 5446     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5447   }
 5448 }
 5449 
 5450 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5451   load_klass(dst, src, tmp);
 5452   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5453 }
 5454 
 5455 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5456   assert(!UseCompactObjectHeaders, "not with compact headers");
 5457   assert_different_registers(src, tmp);
 5458   assert_different_registers(dst, tmp);
 5459   if (UseCompressedClassPointers) {
 5460     encode_klass_not_null(src, tmp);
 5461     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5462   } else {
 5463     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5464   }
 5465 }
 5466 
 5467 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5468   if (UseCompactObjectHeaders) {
 5469     assert(tmp != noreg, "need tmp");
 5470     assert_different_registers(klass, obj, tmp);
 5471     load_narrow_klass_compact(tmp, obj);
 5472     cmpl(klass, tmp);
 5473   } else if (UseCompressedClassPointers) {
 5474     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));

 5500   bool as_raw = (decorators & AS_RAW) != 0;
 5501   if (as_raw) {
 5502     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5503   } else {
 5504     bs->load_at(this, decorators, type, dst, src, tmp1);
 5505   }
 5506 }
 5507 
 5508 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5509                                      Register tmp1, Register tmp2, Register tmp3) {
 5510   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5511   decorators = AccessInternal::decorator_fixup(decorators, type);
 5512   bool as_raw = (decorators & AS_RAW) != 0;
 5513   if (as_raw) {
 5514     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5515   } else {
 5516     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5517   }
 5518 }
 5519 
 5520 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
 5521                                      Register inline_layout_info) {
 5522   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5523   bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
 5524 }
 5525 
 5526 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
 5527   movptr(offset, Address(inline_klass, InlineKlass::adr_members_offset()));
 5528   movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
 5529 }
 5530 
 5531 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
 5532   // ((address) (void*) o) + vk->payload_offset();
 5533   Register offset = (data == oop) ? rscratch1 : data;
 5534   payload_offset(inline_klass, offset);
 5535   if (data == oop) {
 5536     addptr(data, offset);
 5537   } else {
 5538     lea(data, Address(oop, offset));
 5539   }
 5540 }
 5541 
 5542 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5543                                                 Register index, Register data) {
 5544   assert(index != rcx, "index needs to shift by rcx");
 5545   assert_different_registers(array, array_klass, index);
 5546   assert_different_registers(rcx, array, index);
 5547 
 5548   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5549   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5550 
 5551   // Klass::layout_helper_log2_element_size(lh)
 5552   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5553   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5554   andl(rcx, Klass::_lh_log2_element_size_mask);
 5555   shlptr(index); // index << rcx
 5556 
 5557   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
 5558 }
 5559 
 5560 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5561   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5562 }
 5563 
 5564 // Doesn't do verification, generates fixed size code
 5565 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5566   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5567 }
 5568 
 5569 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5570                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5571   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5572 }
 5573 
 5574 // Used for storing nulls.
 5575 void MacroAssembler::store_heap_oop_null(Address dst) {
 5576   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5577 }
 5578 
 5579 void MacroAssembler::store_klass_gap(Register dst, Register src) {

 5896   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5897   int klass_index = oop_recorder()->find_index(k);
 5898   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5899   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5900 }
 5901 
 5902 void MacroAssembler::reinit_heapbase() {
 5903   if (UseCompressedOops) {
 5904     if (Universe::heap() != nullptr) {
 5905       if (CompressedOops::base() == nullptr) {
 5906         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5907       } else {
 5908         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 5909       }
 5910     } else {
 5911       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 5912     }
 5913   }
 5914 }
 5915 
 5916 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 5917   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 5918   // An inline type might be returned. If fields are in registers we
 5919   // need to allocate an inline type instance and initialize it with
 5920   // the value of the fields.
 5921   Label skip;
 5922   // We only need a new buffered inline type if a new one is not returned
 5923   testptr(rax, 1);
 5924   jcc(Assembler::zero, skip);
 5925   int call_offset = -1;
 5926 
 5927 #ifdef _LP64
 5928   // The following code is similar to allocate_instance but has some slight differences,
 5929   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 5930   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
 5931   Label slow_case;
 5932   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 5933   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 5934   if (vk != nullptr) {
 5935     // Called from C1, where the return type is statically known.
 5936     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 5937     jint lh = vk->layout_helper();
 5938     assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 5939     if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
 5940       tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
 5941     } else {
 5942       jmp(slow_case);
 5943     }
 5944   } else {
 5945     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 5946     mov(rbx, rax);
 5947     andptr(rbx, -2);
 5948     if (UseTLAB) {
 5949       movl(r14, Address(rbx, Klass::layout_helper_offset()));
 5950       testl(r14, Klass::_lh_instance_slow_path_bit);
 5951       jcc(Assembler::notZero, slow_case);
 5952       tlab_allocate(rax, r14, 0, r13, r14, slow_case);
 5953     } else {
 5954       jmp(slow_case);
 5955     }
 5956   }
 5957   if (UseTLAB) {
 5958     // 2. Initialize buffered inline instance header
 5959     Register buffer_obj = rax;
 5960     Register klass = rbx;
 5961     if (UseCompactObjectHeaders) {
 5962       Register mark_word = r13;
 5963       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 5964       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), mark_word);
 5965     } else {
 5966       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 5967       xorl(r13, r13);
 5968       store_klass_gap(buffer_obj, r13);
 5969       if (vk == nullptr) {
 5970         // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 5971         mov(r13, klass);
 5972       }
 5973       store_klass(buffer_obj, klass, rscratch1);
 5974       klass = r13;
 5975     }
 5976     // 3. Initialize its fields with an inline class specific handler
 5977     if (vk != nullptr) {
 5978       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 5979     } else {
 5980       movptr(rbx, Address(klass, InlineKlass::adr_members_offset()));
 5981       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 5982       call(rbx);
 5983     }
 5984     jmp(skip);
 5985   }
 5986   bind(slow_case);
 5987   // We failed to allocate a new inline type, fall back to a runtime
 5988   // call. Some oop field may be live in some registers but we can't
 5989   // tell. That runtime call will take care of preserving them
 5990   // across a GC if there's one.
 5991   mov(rax, rscratch1);
 5992 #endif
 5993 
 5994   if (from_interpreter) {
 5995     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 5996   } else {
 5997     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 5998     call_offset = offset();
 5999   }
 6000 
 6001   bind(skip);
 6002   return call_offset;
 6003 }
 6004 
 6005 // Move a value between registers/stack slots and update the reg_state
 6006 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6007   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6008   if (reg_state[to->value()] == reg_written) {
 6009     return true; // Already written
 6010   }
 6011   if (from != to && bt != T_VOID) {
 6012     if (reg_state[to->value()] == reg_readonly) {
 6013       return false; // Not yet writable
 6014     }
 6015     if (from->is_reg()) {
 6016       if (to->is_reg()) {
 6017         if (from->is_XMMRegister()) {
 6018           if (bt == T_DOUBLE) {
 6019             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6020           } else {
 6021             assert(bt == T_FLOAT, "must be float");
 6022             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6023           }
 6024         } else {
 6025           movq(to->as_Register(), from->as_Register());
 6026         }
 6027       } else {
 6028         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6029         Address to_addr = Address(rsp, st_off);
 6030         if (from->is_XMMRegister()) {
 6031           if (bt == T_DOUBLE) {
 6032             movdbl(to_addr, from->as_XMMRegister());
 6033           } else {
 6034             assert(bt == T_FLOAT, "must be float");
 6035             movflt(to_addr, from->as_XMMRegister());
 6036           }
 6037         } else {
 6038           movq(to_addr, from->as_Register());
 6039         }
 6040       }
 6041     } else {
 6042       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6043       if (to->is_reg()) {
 6044         if (to->is_XMMRegister()) {
 6045           if (bt == T_DOUBLE) {
 6046             movdbl(to->as_XMMRegister(), from_addr);
 6047           } else {
 6048             assert(bt == T_FLOAT, "must be float");
 6049             movflt(to->as_XMMRegister(), from_addr);
 6050           }
 6051         } else {
 6052           movq(to->as_Register(), from_addr);
 6053         }
 6054       } else {
 6055         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6056         movq(r13, from_addr);
 6057         movq(Address(rsp, st_off), r13);
 6058       }
 6059     }
 6060   }
 6061   // Update register states
 6062   reg_state[from->value()] = reg_writable;
 6063   reg_state[to->value()] = reg_written;
 6064   return true;
 6065 }
 6066 
 6067 // Calculate the extra stack space required for packing or unpacking inline
 6068 // args and adjust the stack pointer.
 6069 //
 6070 // This extra stack space take into account the copy #2 of the return address,
 6071 // but NOT the saved RBP or the normal size of the frame (see MacroAssembler::remove_frame
 6072 // for notations).
 6073 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6074   // Two additional slots to account for return address
 6075   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6076   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6077   // Save the return address, adjust the stack (make sure it is properly
 6078   // 16-byte aligned) and copy the return address to the new top of the stack.
 6079   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6080   assert(sp_inc > 0, "sanity");
 6081   pop(r13);
 6082   subptr(rsp, sp_inc);
 6083 #ifdef ASSERT
 6084   movl(Address(rsp, -VMRegImpl::stack_slot_size), badRegWordVal);
 6085   movl(Address(rsp, -2 * VMRegImpl::stack_slot_size), badRegWordVal);
 6086   subptr(rsp, 2 * VMRegImpl::stack_slot_size);
 6087 #else
 6088   push(r13);
 6089 #endif
 6090   return sp_inc;
 6091 }
 6092 
 6093 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6094 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6095                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6096                                           RegState reg_state[]) {
 6097   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6098   assert(from->is_valid(), "source must be valid");
 6099   bool progress = false;
 6100 #ifdef ASSERT
 6101   const int start_offset = offset();
 6102 #endif
 6103 
 6104   Label L_null, L_notNull;
 6105   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6106   Register tmp1 = r10;
 6107   Register tmp2 = r13;
 6108   Register fromReg = noreg;
 6109   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, true);
 6110   bool done = true;
 6111   bool mark_done = true;
 6112   VMReg toReg;
 6113   BasicType bt;
 6114   // Check if argument requires a null check
 6115   bool null_check = false;
 6116   VMReg nullCheckReg;
 6117   while (stream.next(nullCheckReg, bt)) {
 6118     if (sig->at(stream.sig_index())._offset == -1) {
 6119       null_check = true;
 6120       break;
 6121     }
 6122   }
 6123   stream.reset(sig_index, to_index);
 6124   while (stream.next(toReg, bt)) {
 6125     assert(toReg->is_valid(), "destination must be valid");
 6126     int idx = (int)toReg->value();
 6127     if (reg_state[idx] == reg_readonly) {
 6128       if (idx != from->value()) {
 6129         mark_done = false;
 6130       }
 6131       done = false;
 6132       continue;
 6133     } else if (reg_state[idx] == reg_written) {
 6134       continue;
 6135     }
 6136     assert(reg_state[idx] == reg_writable, "must be writable");
 6137     reg_state[idx] = reg_written;
 6138     progress = true;
 6139 
 6140     if (fromReg == noreg) {
 6141       if (from->is_reg()) {
 6142         fromReg = from->as_Register();
 6143       } else {
 6144         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6145         movq(tmp1, Address(rsp, st_off));
 6146         fromReg = tmp1;
 6147       }
 6148       if (null_check) {
 6149         // Nullable inline type argument, emit null check
 6150         testptr(fromReg, fromReg);
 6151         jcc(Assembler::zero, L_null);
 6152       }
 6153     }
 6154     int off = sig->at(stream.sig_index())._offset;
 6155     if (off == -1) {
 6156       assert(null_check, "Missing null check at");
 6157       if (toReg->is_stack()) {
 6158         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6159         movq(Address(rsp, st_off), 1);
 6160       } else {
 6161         movq(toReg->as_Register(), 1);
 6162       }
 6163       continue;
 6164     }
 6165     assert(off > 0, "offset in object should be positive");
 6166     Address fromAddr = Address(fromReg, off);
 6167     if (!toReg->is_XMMRegister()) {
 6168       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6169       if (is_reference_type(bt)) {
 6170         load_heap_oop(dst, fromAddr);
 6171       } else {
 6172         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6173         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6174       }
 6175       if (toReg->is_stack()) {
 6176         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6177         movq(Address(rsp, st_off), dst);
 6178       }
 6179     } else if (bt == T_DOUBLE) {
 6180       movdbl(toReg->as_XMMRegister(), fromAddr);
 6181     } else {
 6182       assert(bt == T_FLOAT, "must be float");
 6183       movflt(toReg->as_XMMRegister(), fromAddr);
 6184     }
 6185   }
 6186   if (progress && null_check) {
 6187     if (done) {
 6188       jmp(L_notNull);
 6189       bind(L_null);
 6190       // Set null marker to zero to signal that the argument is null.
 6191       // Also set all oop fields to zero to make the GC happy.
 6192       stream.reset(sig_index, to_index);
 6193       while (stream.next(toReg, bt)) {
 6194         if (sig->at(stream.sig_index())._offset == -1 ||
 6195             bt == T_OBJECT || bt == T_ARRAY) {
 6196           if (toReg->is_stack()) {
 6197             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6198             movq(Address(rsp, st_off), 0);
 6199           } else {
 6200             xorq(toReg->as_Register(), toReg->as_Register());
 6201           }
 6202         }
 6203       }
 6204       bind(L_notNull);
 6205     } else {
 6206       bind(L_null);
 6207     }
 6208   }
 6209 
 6210   sig_index = stream.sig_index();
 6211   to_index = stream.regs_index();
 6212 
 6213   if (mark_done && reg_state[from->value()] != reg_written) {
 6214     // This is okay because no one else will write to that slot
 6215     reg_state[from->value()] = reg_writable;
 6216   }
 6217   from_index--;
 6218   assert(progress || (start_offset == offset()), "should not emit code");
 6219   return done;
 6220 }
 6221 
 6222 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6223                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6224                                         RegState reg_state[], Register val_array) {
 6225   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
 6226   assert(to->is_valid(), "destination must be valid");
 6227 
 6228   if (reg_state[to->value()] == reg_written) {
 6229     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6230     return true; // Already written
 6231   }
 6232 
 6233   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6234   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6235   Register val_obj_tmp = r11;
 6236   Register from_reg_tmp = r14;
 6237   Register tmp1 = r10;
 6238   Register tmp2 = r13;
 6239   Register tmp3 = rbx;
 6240   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6241 
 6242   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6243 
 6244   if (reg_state[to->value()] == reg_readonly) {
 6245     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6246       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6247       return false; // Not yet writable
 6248     }
 6249     val_obj = val_obj_tmp;
 6250   }
 6251 
 6252   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
 6253   load_heap_oop(val_obj, Address(val_array, index));
 6254 
 6255   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6256   VMReg fromReg;
 6257   BasicType bt;
 6258   Label L_null;
 6259   while (stream.next(fromReg, bt)) {
 6260     assert(fromReg->is_valid(), "source must be valid");
 6261     reg_state[fromReg->value()] = reg_writable;
 6262 
 6263     int off = sig->at(stream.sig_index())._offset;
 6264     if (off == -1) {
 6265       // Nullable inline type argument, emit null check
 6266       Label L_notNull;
 6267       if (fromReg->is_stack()) {
 6268         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6269         testb(Address(rsp, ld_off), 1);
 6270       } else {
 6271         testb(fromReg->as_Register(), 1);
 6272       }
 6273       jcc(Assembler::notZero, L_notNull);
 6274       movptr(val_obj, 0);
 6275       jmp(L_null);
 6276       bind(L_notNull);
 6277       continue;
 6278     }
 6279 
 6280     assert(off > 0, "offset in object should be positive");
 6281     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6282 
 6283     // Pack the scalarized field into the value object.
 6284     Address dst(val_obj, off);
 6285     if (!fromReg->is_XMMRegister()) {
 6286       Register src;
 6287       if (fromReg->is_stack()) {
 6288         src = from_reg_tmp;
 6289         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6290         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6291       } else {
 6292         src = fromReg->as_Register();
 6293       }
 6294       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6295       if (is_reference_type(bt)) {
 6296         // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep val_obj valid.
 6297         mov(tmp3, val_obj);
 6298         Address dst_with_tmp3(tmp3, off);
 6299         store_heap_oop(dst_with_tmp3, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6300       } else {
 6301         store_sized_value(dst, src, size_in_bytes);
 6302       }
 6303     } else if (bt == T_DOUBLE) {
 6304       movdbl(dst, fromReg->as_XMMRegister());
 6305     } else {
 6306       assert(bt == T_FLOAT, "must be float");
 6307       movflt(dst, fromReg->as_XMMRegister());
 6308     }
 6309   }
 6310   bind(L_null);
 6311   sig_index = stream.sig_index();
 6312   from_index = stream.regs_index();
 6313 
 6314   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6315   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6316   assert(success, "to register must be writeable");
 6317   return true;
 6318 }
 6319 
 6320 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6321   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6322 }
 6323 
 6324 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6325   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6326   if (needs_stack_repair) {
 6327     // The method has a scalarized entry point (where fields of value object arguments
 6328     // are passed through registers and stack), and a non-scalarized entry point (where
 6329     // value object arguments are given as oops). The non-scalarized entry point will
 6330     // first load each field of value object arguments and store them in registers and on
 6331     // the stack in a way compatible with the scalarized entry point. To do so, some extra
 6332     // stack space might be reserved (if argument registers are not enough). On leaving the
 6333     // method, this space must be freed.
 6334     //
 6335     // In case we used the non-scalarized entry point the stack looks like this:
 6336     //
 6337     // | Arguments from caller     |
 6338     // |---------------------------|  <-- caller's SP
 6339     // | Return address #1         |
 6340     // |---------------------------|
 6341     // | Extension space for       |
 6342     // |   inline arg (un)packing  |
 6343     // |---------------------------|
 6344     // | Return address #2         |
 6345     // | Saved RBP                 |
 6346     // |---------------------------|  <-- start of this method's frame
 6347     // | sp_inc                    |
 6348     // | method locals             |
 6349     // |---------------------------|  <-- SP
 6350     //
 6351     // There is two copies of the return address on the stack. They will be identical at
 6352     // first, but that can change.
 6353     // If the caller has been deoptimized, the copy #1 will be patched to point at the
 6354     // deopt blob, and the copy #2 will still point into the old method. In short
 6355     // the copy #2 is not reliable and should not be used. It is mostly needed to
 6356     // add space between the extension space and the locals, as there would be between
 6357     // the real arguments and the locals if we don't need to do unpacking (from the
 6358     // scalarized entry point).
 6359     //
 6360     // When leaving, one must use the copy #1 of the return address, while keeping in mind
 6361     // that from the scalarized entry point, there will be only one copy. Indeed, in the
 6362     // case we used the scalarized calling convention, the stack looks like this:
 6363     //
 6364     // | Arguments from caller     |
 6365     // |---------------------------|  <-- caller's SP
 6366     // | Return address            |
 6367     // | Saved RBP                 |
 6368     // |---------------------------|  <-- start of this method's frame
 6369     // | sp_inc                    |
 6370     // | method locals             |
 6371     // |---------------------------|  <-- SP
 6372     //
 6373     // The sp_inc stack slot holds the total size of the frame, including the extension
 6374     // space the possible copy #2 of the return address and the saved RBP (but never the
 6375     // copy #1 of the return address). That is how to find the copy #1 of the return address.
 6376     // This size is expressed in bytes. Be careful when using it from C++ in pointer arithmetic;
 6377     // you might need to divide it by wordSize.
 6378     //
 6379     // One can find sp_inc since the start the method's frame is SP + initial_framesize.
 6380 
 6381     movq(rbp, Address(rsp, initial_framesize));
 6382     // The stack increment resides just below the saved rbp
 6383     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6384   } else {
 6385     if (initial_framesize > 0) {
 6386       addq(rsp, initial_framesize);
 6387     }
 6388     pop(rbp);
 6389   }
 6390 }
 6391 
 6392 #if COMPILER2_OR_JVMCI
 6393 
 6394 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 6395 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 6396   // cnt - number of qwords (8-byte words).
 6397   // base - start address, qword aligned.
 6398   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 6399   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 6400   if (use64byteVector) {
 6401     evpbroadcastq(xtmp, val, AVX_512bit);
 6402   } else if (MaxVectorSize >= 32) {
 6403     movdq(xtmp, val);
 6404     punpcklqdq(xtmp, xtmp);
 6405     vinserti128_high(xtmp, xtmp);
 6406   } else {
 6407     movdq(xtmp, val);
 6408     punpcklqdq(xtmp, xtmp);
 6409   }
 6410   jmp(L_zero_64_bytes);
 6411 
 6412   BIND(L_loop);
 6413   if (MaxVectorSize >= 32) {
 6414     fill64(base, 0, xtmp, use64byteVector);
 6415   } else {
 6416     movdqu(Address(base,  0), xtmp);
 6417     movdqu(Address(base, 16), xtmp);
 6418     movdqu(Address(base, 32), xtmp);
 6419     movdqu(Address(base, 48), xtmp);
 6420   }
 6421   addptr(base, 64);
 6422 
 6423   BIND(L_zero_64_bytes);
 6424   subptr(cnt, 8);
 6425   jccb(Assembler::greaterEqual, L_loop);
 6426 
 6427   // Copy trailing 64 bytes
 6428   if (use64byteVector) {
 6429     addptr(cnt, 8);
 6430     jccb(Assembler::equal, L_end);
 6431     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 6432     jmp(L_end);
 6433   } else {
 6434     addptr(cnt, 4);
 6435     jccb(Assembler::less, L_tail);
 6436     if (MaxVectorSize >= 32) {
 6437       vmovdqu(Address(base, 0), xtmp);
 6438     } else {
 6439       movdqu(Address(base,  0), xtmp);
 6440       movdqu(Address(base, 16), xtmp);
 6441     }
 6442   }
 6443   addptr(base, 32);
 6444   subptr(cnt, 4);
 6445 
 6446   BIND(L_tail);
 6447   addptr(cnt, 4);
 6448   jccb(Assembler::lessEqual, L_end);
 6449   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 6450     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 6451   } else {
 6452     decrement(cnt);
 6453 
 6454     BIND(L_sloop);
 6455     movq(Address(base, 0), xtmp);
 6456     addptr(base, 8);
 6457     decrement(cnt);
 6458     jccb(Assembler::greaterEqual, L_sloop);
 6459   }
 6460   BIND(L_end);
 6461 }
 6462 
 6463 // Clearing constant sized memory using YMM/ZMM registers.
 6464 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6465   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 6466   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6467 
 6468   int vector64_count = (cnt & (~0x7)) >> 3;
 6469   cnt = cnt & 0x7;
 6470   const int fill64_per_loop = 4;

 6532         break;
 6533       case 7:
 6534         if (use64byteVector) {
 6535           movl(rtmp, 0x7F);
 6536           kmovwl(mask, rtmp);
 6537           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6538         } else {
 6539           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6540           movl(rtmp, 0x7);
 6541           kmovwl(mask, rtmp);
 6542           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6543         }
 6544         break;
 6545       default:
 6546         fatal("Unexpected length : %d\n",cnt);
 6547         break;
 6548     }
 6549   }
 6550 }
 6551 
 6552 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6553                                bool is_large, bool word_copy_only, KRegister mask) {
 6554   // cnt      - number of qwords (8-byte words).
 6555   // base     - start address, qword aligned.
 6556   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6557   assert(base==rdi, "base register must be edi for rep stos");
 6558   assert(val==rax,   "val register must be eax for rep stos");
 6559   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6560   assert(InitArrayShortSize % BytesPerLong == 0,
 6561     "InitArrayShortSize should be the multiple of BytesPerLong");
 6562 
 6563   Label DONE;



 6564 
 6565   if (!is_large) {
 6566     Label LOOP, LONG;
 6567     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6568     jccb(Assembler::greater, LONG);
 6569 
 6570     decrement(cnt);
 6571     jccb(Assembler::negative, DONE); // Zero length
 6572 
 6573     // Use individual pointer-sized stores for small counts:
 6574     BIND(LOOP);
 6575     movptr(Address(base, cnt, Address::times_ptr), val);
 6576     decrement(cnt);
 6577     jccb(Assembler::greaterEqual, LOOP);
 6578     jmpb(DONE);
 6579 
 6580     BIND(LONG);
 6581   }
 6582 
 6583   // Use longer rep-prefixed ops for non-small counts:
 6584   if (UseFastStosb && !word_copy_only) {
 6585     shlptr(cnt, 3); // convert to number of bytes
 6586     rep_stosb();
 6587   } else if (UseXMMForObjInit) {
 6588     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6589   } else {
 6590     rep_stos();
 6591   }
 6592 
 6593   BIND(DONE);
 6594 }
 6595 
 6596 #endif //COMPILER2_OR_JVMCI
 6597 
 6598 
 6599 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6600                                    Register to, Register value, Register count,
 6601                                    Register rtmp, XMMRegister xtmp) {
 6602   ShortBranchVerifier sbv(this);
 6603   assert_different_registers(to, value, count, rtmp);
 6604   Label L_exit;
 6605   Label L_fill_2_bytes, L_fill_4_bytes;
 6606 
 6607 #if defined(COMPILER2)
 6608   if(MaxVectorSize >=32 &&

10488 
10489   // Load top.
10490   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10491 
10492   // Check if the lock-stack is full.
10493   cmpl(top, LockStack::end_offset());
10494   jcc(Assembler::greaterEqual, slow);
10495 
10496   // Check for recursion.
10497   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10498   jcc(Assembler::equal, push);
10499 
10500   // Check header for monitor (0b10).
10501   testptr(reg_rax, markWord::monitor_value);
10502   jcc(Assembler::notZero, slow);
10503 
10504   // Try to lock. Transition lock bits 0b01 => 0b00
10505   movptr(tmp, reg_rax);
10506   andptr(tmp, ~(int32_t)markWord::unlocked_value);
10507   orptr(reg_rax, markWord::unlocked_value);
10508   // Mask inline_type bit such that we go to the slow path if object is an inline type
10509   andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10510 
10511   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10512   jcc(Assembler::notEqual, slow);
10513 
10514   // Restore top, CAS clobbers register.
10515   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10516 
10517   bind(push);
10518   // After successful lock, push object on lock-stack.
10519   movptr(Address(thread, top), obj);
10520   incrementl(top, oopSize);
10521   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10522 }
10523 
10524 // Implements fast-unlocking.
10525 //
10526 // obj: the object to be unlocked
10527 // reg_rax: rax
10528 // thread: the thread
10529 // tmp: a temporary register
10530 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
< prev index next >