< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/aotCodeCache.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"

   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "interpreter/interpreterRuntime.hpp"
   39 #include "jvm.h"
   40 #include "memory/resourceArea.hpp"
   41 #include "memory/universe.hpp"
   42 #include "oops/accessDecorators.hpp"
   43 #include "oops/compressedKlass.inline.hpp"
   44 #include "oops/compressedOops.inline.hpp"
   45 #include "oops/klass.inline.hpp"

   46 #include "prims/methodHandles.hpp"

   47 #include "runtime/continuation.hpp"
   48 #include "runtime/interfaceSupport.inline.hpp"
   49 #include "runtime/javaThread.hpp"
   50 #include "runtime/jniHandles.hpp"
   51 #include "runtime/objectMonitor.hpp"
   52 #include "runtime/os.hpp"
   53 #include "runtime/safepoint.hpp"
   54 #include "runtime/safepointMechanism.hpp"
   55 #include "runtime/sharedRuntime.hpp"

   56 #include "runtime/stubRoutines.hpp"
   57 #include "utilities/checkedCast.hpp"
   58 #include "utilities/macros.hpp"




   59 
   60 #ifdef PRODUCT
   61 #define BLOCK_COMMENT(str) /* nothing */
   62 #define STOP(error) stop(error)
   63 #else
   64 #define BLOCK_COMMENT(str) block_comment(str)
   65 #define STOP(error) block_comment(error); stop(error)
   66 #endif
   67 
   68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   69 
   70 #ifdef ASSERT
   71 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   72 #endif
   73 
   74 static const Assembler::Condition reverse[] = {
   75     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   76     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   77     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   78     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1286 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1287   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1288   assert_different_registers(arg_1, c_rarg2);
 1289   pass_arg2(this, arg_2);
 1290   pass_arg1(this, arg_1);
 1291   pass_arg0(this, arg_0);
 1292   call_VM_leaf(entry_point, 3);
 1293 }
 1294 
 1295 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1296   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1297   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1298   assert_different_registers(arg_2, c_rarg3);
 1299   pass_arg3(this, arg_3);
 1300   pass_arg2(this, arg_2);
 1301   pass_arg1(this, arg_1);
 1302   pass_arg0(this, arg_0);
 1303   call_VM_leaf(entry_point, 3);
 1304 }
 1305 




 1306 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1307   pass_arg0(this, arg_0);
 1308   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1309 }
 1310 
 1311 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1312   assert_different_registers(arg_0, c_rarg1);
 1313   pass_arg1(this, arg_1);
 1314   pass_arg0(this, arg_0);
 1315   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1316 }
 1317 
 1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1319   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1320   assert_different_registers(arg_1, c_rarg2);
 1321   pass_arg2(this, arg_2);
 1322   pass_arg1(this, arg_1);
 1323   pass_arg0(this, arg_0);
 1324   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1325 }

 2339     lea(rscratch, src);
 2340     Assembler::mulss(dst, Address(rscratch, 0));
 2341   }
 2342 }
 2343 
 2344 void MacroAssembler::null_check(Register reg, int offset) {
 2345   if (needs_explicit_null_check(offset)) {
 2346     // provoke OS null exception if reg is null by
 2347     // accessing M[reg] w/o changing any (non-CC) registers
 2348     // NOTE: cmpl is plenty here to provoke a segv
 2349     cmpptr(rax, Address(reg, 0));
 2350     // Note: should probably use testl(rax, Address(reg, 0));
 2351     //       may be shorter code (however, this version of
 2352     //       testl needs to be implemented first)
 2353   } else {
 2354     // nothing to do, (later) access of M[reg + offset]
 2355     // will provoke OS null exception if reg is null
 2356   }
 2357 }
 2358 











































































































 2359 void MacroAssembler::os_breakpoint() {
 2360   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2361   // (e.g., MSVC can't call ps() otherwise)
 2362   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2363 }
 2364 
 2365 void MacroAssembler::unimplemented(const char* what) {
 2366   const char* buf = nullptr;
 2367   {
 2368     ResourceMark rm;
 2369     stringStream ss;
 2370     ss.print("unimplemented: %s", what);
 2371     buf = code_string(ss.as_string());
 2372   }
 2373   stop(buf);
 2374 }
 2375 
 2376 #define XSTATE_BV 0x200
 2377 
 2378 void MacroAssembler::pop_CPU_state() {

 3421 }
 3422 
 3423 // C++ bool manipulation
 3424 void MacroAssembler::testbool(Register dst) {
 3425   if(sizeof(bool) == 1)
 3426     testb(dst, 0xff);
 3427   else if(sizeof(bool) == 2) {
 3428     // testw implementation needed for two byte bools
 3429     ShouldNotReachHere();
 3430   } else if(sizeof(bool) == 4)
 3431     testl(dst, dst);
 3432   else
 3433     // unsupported
 3434     ShouldNotReachHere();
 3435 }
 3436 
 3437 void MacroAssembler::testptr(Register dst, Register src) {
 3438   testq(dst, src);
 3439 }
 3440 






















































































































 3441 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3442 void MacroAssembler::tlab_allocate(Register obj,
 3443                                    Register var_size_in_bytes,
 3444                                    int con_size_in_bytes,
 3445                                    Register t1,
 3446                                    Register t2,
 3447                                    Label& slow_case) {
 3448   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3449   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3450 }
 3451 
 3452 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3453   RegSet regs;
 3454   regs += RegSet::of(rax, rcx, rdx);
 3455 #ifndef _WINDOWS
 3456   regs += RegSet::of(rsi, rdi);
 3457 #endif
 3458   regs += RegSet::range(r8, r11);
 3459   if (UseAPX) {
 3460     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));

 3624   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3625   if (UseIncDec) {
 3626     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3627   } else {
 3628     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3629     shrptr(index, 1);
 3630   }
 3631 
 3632   // initialize remaining object fields: index is a multiple of 2 now
 3633   {
 3634     Label loop;
 3635     bind(loop);
 3636     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3637     decrement(index);
 3638     jcc(Assembler::notZero, loop);
 3639   }
 3640 
 3641   bind(done);
 3642 }
 3643 






















 3644 // Look up the method for a megamorphic invokeinterface call.
 3645 // The target method is determined by <intf_klass, itable_index>.
 3646 // The receiver klass is in recv_klass.
 3647 // On success, the result will be in method_result, and execution falls through.
 3648 // On failure, execution transfers to the given label.
 3649 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3650                                              Register intf_klass,
 3651                                              RegisterOrConstant itable_index,
 3652                                              Register method_result,
 3653                                              Register scan_temp,
 3654                                              Label& L_no_such_interface,
 3655                                              bool return_method) {
 3656   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3657   assert_different_registers(method_result, intf_klass, scan_temp);
 3658   assert(recv_klass != method_result || !return_method,
 3659          "recv_klass can be destroyed when method isn't needed");
 3660 
 3661   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3662          "caller must use same register for non-constant itable index as for method");
 3663 

 4674   } else {
 4675     Label L;
 4676     jccb(negate_condition(cc), L);
 4677     movl(dst, src);
 4678     bind(L);
 4679   }
 4680 }
 4681 
 4682 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4683   if (VM_Version::supports_cmov()) {
 4684     cmovl(cc, dst, src);
 4685   } else {
 4686     Label L;
 4687     jccb(negate_condition(cc), L);
 4688     movl(dst, src);
 4689     bind(L);
 4690   }
 4691 }
 4692 
 4693 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4694   if (!VerifyOops) return;




 4695 
 4696   BLOCK_COMMENT("verify_oop {");
 4697   push(rscratch1);
 4698   push(rax);                          // save rax
 4699   push(reg);                          // pass register argument
 4700 
 4701   // Pass register number to verify_oop_subroutine
 4702   const char* b = nullptr;
 4703   {
 4704     ResourceMark rm;
 4705     stringStream ss;
 4706     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4707     b = code_string(ss.as_string());
 4708   }
 4709   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4710   pushptr(buffer.addr(), rscratch1);
 4711 
 4712   // call indirectly to solve generation ordering problem
 4713   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4714   call(rax);

 4930   // or something else. Since this is a slow path, we can optimize for code density,
 4931   // and just restart the search from the beginning.
 4932   jmpb(L_restart);
 4933 
 4934   // Counter updates:
 4935 
 4936   // Increment polymorphic counter instead of receiver slot.
 4937   bind(L_polymorphic);
 4938   movptr(offset, poly_count_offset);
 4939   jmpb(L_count_update);
 4940 
 4941   // Found a receiver, convert its slot offset to corresponding count offset.
 4942   bind(L_found_recv);
 4943   addptr(offset, receiver_to_count_step);
 4944 
 4945   bind(L_count_update);
 4946   addptr(Address(mdp, offset, Address::times_ptr), DataLayout::counter_increment);
 4947 }
 4948 
 4949 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4950   if (!VerifyOops) return;




 4951 
 4952   push(rscratch1);
 4953   push(rax); // save rax,
 4954   // addr may contain rsp so we will have to adjust it based on the push
 4955   // we just did (and on 64 bit we do two pushes)
 4956   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4957   // stores rax into addr which is backwards of what was intended.
 4958   if (addr.uses(rsp)) {
 4959     lea(rax, addr);
 4960     pushptr(Address(rax, 2 * BytesPerWord));
 4961   } else {
 4962     pushptr(addr);
 4963   }
 4964 
 4965   // Pass register number to verify_oop_subroutine
 4966   const char* b = nullptr;
 4967   {
 4968     ResourceMark rm;
 4969     stringStream ss;
 4970     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);

 5324 
 5325 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5326   // get mirror
 5327   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5328   load_method_holder(mirror, method);
 5329   movptr(mirror, Address(mirror, mirror_offset));
 5330   resolve_oop_handle(mirror, tmp);
 5331 }
 5332 
 5333 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5334   load_method_holder(rresult, rmethod);
 5335   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5336 }
 5337 
 5338 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5339   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5340   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5341   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5342 }
 5343 










 5344 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5345   assert(UseCompactObjectHeaders, "expect compact object headers");
 5346   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5347   shrq(dst, markWord::klass_shift);
 5348 }
 5349 
 5350 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5351   assert_different_registers(src, tmp);
 5352   assert_different_registers(dst, tmp);
 5353 
 5354   if (UseCompactObjectHeaders) {
 5355     load_narrow_klass_compact(dst, src);
 5356     decode_klass_not_null(dst, tmp);
 5357   } else if (UseCompressedClassPointers) {
 5358     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5359     decode_klass_not_null(dst, tmp);
 5360   } else {
 5361     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5362   }
 5363 }
 5364 





 5365 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5366   assert(!UseCompactObjectHeaders, "not with compact headers");
 5367   assert_different_registers(src, tmp);
 5368   assert_different_registers(dst, tmp);
 5369   if (UseCompressedClassPointers) {
 5370     encode_klass_not_null(src, tmp);
 5371     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5372   } else {
 5373     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5374   }
 5375 }
 5376 
 5377 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5378   if (UseCompactObjectHeaders) {
 5379     assert(tmp != noreg, "need tmp");
 5380     assert_different_registers(klass, obj, tmp);
 5381     load_narrow_klass_compact(tmp, obj);
 5382     cmpl(klass, tmp);
 5383   } else if (UseCompressedClassPointers) {
 5384     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));

 5410   bool as_raw = (decorators & AS_RAW) != 0;
 5411   if (as_raw) {
 5412     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5413   } else {
 5414     bs->load_at(this, decorators, type, dst, src, tmp1);
 5415   }
 5416 }
 5417 
 5418 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5419                                      Register tmp1, Register tmp2, Register tmp3) {
 5420   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5421   decorators = AccessInternal::decorator_fixup(decorators, type);
 5422   bool as_raw = (decorators & AS_RAW) != 0;
 5423   if (as_raw) {
 5424     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5425   } else {
 5426     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5427   }
 5428 }
 5429 








































 5430 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5431   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5432 }
 5433 
 5434 // Doesn't do verification, generates fixed size code
 5435 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5436   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5437 }
 5438 
 5439 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5440                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5441   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5442 }
 5443 
 5444 // Used for storing nulls.
 5445 void MacroAssembler::store_heap_oop_null(Address dst) {
 5446   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5447 }
 5448 
 5449 void MacroAssembler::store_klass_gap(Register dst, Register src) {

 5766   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5767   int klass_index = oop_recorder()->find_index(k);
 5768   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5769   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5770 }
 5771 
 5772 void MacroAssembler::reinit_heapbase() {
 5773   if (UseCompressedOops) {
 5774     if (Universe::heap() != nullptr) {
 5775       if (CompressedOops::base() == nullptr) {
 5776         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5777       } else {
 5778         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 5779       }
 5780     } else {
 5781       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 5782     }
 5783   }
 5784 }
 5785 




























































































































































































































































































































































































































































































 5786 #if COMPILER2_OR_JVMCI
 5787 
 5788 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5789 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5790   // cnt - number of qwords (8-byte words).
 5791   // base - start address, qword aligned.
 5792   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5793   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5794   if (use64byteVector) {
 5795     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
 5796   } else if (MaxVectorSize >= 32) {
 5797     vpxor(xtmp, xtmp, xtmp, AVX_256bit);


 5798   } else {
 5799     pxor(xtmp, xtmp);

 5800   }
 5801   jmp(L_zero_64_bytes);
 5802 
 5803   BIND(L_loop);
 5804   if (MaxVectorSize >= 32) {
 5805     fill64(base, 0, xtmp, use64byteVector);
 5806   } else {
 5807     movdqu(Address(base,  0), xtmp);
 5808     movdqu(Address(base, 16), xtmp);
 5809     movdqu(Address(base, 32), xtmp);
 5810     movdqu(Address(base, 48), xtmp);
 5811   }
 5812   addptr(base, 64);
 5813 
 5814   BIND(L_zero_64_bytes);
 5815   subptr(cnt, 8);
 5816   jccb(Assembler::greaterEqual, L_loop);
 5817 
 5818   // Copy trailing 64 bytes
 5819   if (use64byteVector) {
 5820     addptr(cnt, 8);
 5821     jccb(Assembler::equal, L_end);
 5822     fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
 5823     jmp(L_end);
 5824   } else {
 5825     addptr(cnt, 4);
 5826     jccb(Assembler::less, L_tail);
 5827     if (MaxVectorSize >= 32) {
 5828       vmovdqu(Address(base, 0), xtmp);
 5829     } else {
 5830       movdqu(Address(base,  0), xtmp);
 5831       movdqu(Address(base, 16), xtmp);
 5832     }
 5833   }
 5834   addptr(base, 32);
 5835   subptr(cnt, 4);
 5836 
 5837   BIND(L_tail);
 5838   addptr(cnt, 4);
 5839   jccb(Assembler::lessEqual, L_end);
 5840   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5841     fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
 5842   } else {
 5843     decrement(cnt);
 5844 
 5845     BIND(L_sloop);
 5846     movq(Address(base, 0), xtmp);
 5847     addptr(base, 8);
 5848     decrement(cnt);
 5849     jccb(Assembler::greaterEqual, L_sloop);
 5850   }
 5851   BIND(L_end);
 5852 }
 5853 
 5854 // Clearing constant sized memory using YMM/ZMM registers.
 5855 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5856   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 5857   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 5858 
 5859   int vector64_count = (cnt & (~0x7)) >> 3;
 5860   cnt = cnt & 0x7;
 5861   const int fill64_per_loop = 4;

 5923         break;
 5924       case 7:
 5925         if (use64byteVector) {
 5926           movl(rtmp, 0x7F);
 5927           kmovwl(mask, rtmp);
 5928           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5929         } else {
 5930           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5931           movl(rtmp, 0x7);
 5932           kmovwl(mask, rtmp);
 5933           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 5934         }
 5935         break;
 5936       default:
 5937         fatal("Unexpected length : %d\n",cnt);
 5938         break;
 5939     }
 5940   }
 5941 }
 5942 
 5943 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
 5944                                bool is_large, KRegister mask) {
 5945   // cnt      - number of qwords (8-byte words).
 5946   // base     - start address, qword aligned.
 5947   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 5948   assert(base==rdi, "base register must be edi for rep stos");
 5949   assert(tmp==rax,   "tmp register must be eax for rep stos");
 5950   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 5951   assert(InitArrayShortSize % BytesPerLong == 0,
 5952     "InitArrayShortSize should be the multiple of BytesPerLong");
 5953 
 5954   Label DONE;
 5955   if (!is_large || !UseXMMForObjInit) {
 5956     xorptr(tmp, tmp);
 5957   }
 5958 
 5959   if (!is_large) {
 5960     Label LOOP, LONG;
 5961     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 5962     jccb(Assembler::greater, LONG);
 5963 
 5964     decrement(cnt);
 5965     jccb(Assembler::negative, DONE); // Zero length
 5966 
 5967     // Use individual pointer-sized stores for small counts:
 5968     BIND(LOOP);
 5969     movptr(Address(base, cnt, Address::times_ptr), tmp);
 5970     decrement(cnt);
 5971     jccb(Assembler::greaterEqual, LOOP);
 5972     jmpb(DONE);
 5973 
 5974     BIND(LONG);
 5975   }
 5976 
 5977   // Use longer rep-prefixed ops for non-small counts:
 5978   if (UseFastStosb) {
 5979     shlptr(cnt, 3); // convert to number of bytes
 5980     rep_stosb();
 5981   } else if (UseXMMForObjInit) {
 5982     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
 5983   } else {
 5984     rep_stos();
 5985   }
 5986 
 5987   BIND(DONE);
 5988 }
 5989 
 5990 #endif //COMPILER2_OR_JVMCI
 5991 
 5992 
 5993 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 5994                                    Register to, Register value, Register count,
 5995                                    Register rtmp, XMMRegister xtmp) {
 5996   ShortBranchVerifier sbv(this);
 5997   assert_different_registers(to, value, count, rtmp);
 5998   Label L_exit;
 5999   Label L_fill_2_bytes, L_fill_4_bytes;
 6000 
 6001 #if defined(COMPILER2)
 6002   if(MaxVectorSize >=32 &&

 9868 
 9869   // Load top.
 9870   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9871 
 9872   // Check if the lock-stack is full.
 9873   cmpl(top, LockStack::end_offset());
 9874   jcc(Assembler::greaterEqual, slow);
 9875 
 9876   // Check for recursion.
 9877   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
 9878   jcc(Assembler::equal, push);
 9879 
 9880   // Check header for monitor (0b10).
 9881   testptr(reg_rax, markWord::monitor_value);
 9882   jcc(Assembler::notZero, slow);
 9883 
 9884   // Try to lock. Transition lock bits 0b01 => 0b00
 9885   movptr(tmp, reg_rax);
 9886   andptr(tmp, ~(int32_t)markWord::unlocked_value);
 9887   orptr(reg_rax, markWord::unlocked_value);



 9888   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
 9889   jcc(Assembler::notEqual, slow);
 9890 
 9891   // Restore top, CAS clobbers register.
 9892   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9893 
 9894   bind(push);
 9895   // After successful lock, push object on lock-stack.
 9896   movptr(Address(thread, top), obj);
 9897   incrementl(top, oopSize);
 9898   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
 9899 }
 9900 
 9901 // Implements fast-unlocking.
 9902 //
 9903 // obj: the object to be unlocked
 9904 // reg_rax: rax
 9905 // thread: the thread
 9906 // tmp: a temporary register
 9907 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {

   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/aotCodeCache.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"
   31 #include "ci/ciInlineKlass.hpp"
   32 #include "crc32c.h"
   33 #include "gc/shared/barrierSet.hpp"
   34 #include "gc/shared/barrierSetAssembler.hpp"
   35 #include "gc/shared/collectedHeap.inline.hpp"
   36 #include "gc/shared/tlab_globals.hpp"
   37 #include "interpreter/bytecodeHistogram.hpp"
   38 #include "interpreter/interpreter.hpp"
   39 #include "interpreter/interpreterRuntime.hpp"
   40 #include "jvm.h"
   41 #include "memory/resourceArea.hpp"
   42 #include "memory/universe.hpp"
   43 #include "oops/accessDecorators.hpp"
   44 #include "oops/compressedKlass.inline.hpp"
   45 #include "oops/compressedOops.inline.hpp"
   46 #include "oops/klass.inline.hpp"
   47 #include "oops/resolvedFieldEntry.hpp"
   48 #include "prims/methodHandles.hpp"
   49 #include "runtime/arguments.hpp"
   50 #include "runtime/continuation.hpp"
   51 #include "runtime/interfaceSupport.inline.hpp"
   52 #include "runtime/javaThread.hpp"
   53 #include "runtime/jniHandles.hpp"
   54 #include "runtime/objectMonitor.hpp"
   55 #include "runtime/os.hpp"
   56 #include "runtime/safepoint.hpp"
   57 #include "runtime/safepointMechanism.hpp"
   58 #include "runtime/sharedRuntime.hpp"
   59 #include "runtime/signature_cc.hpp"
   60 #include "runtime/stubRoutines.hpp"
   61 #include "utilities/checkedCast.hpp"
   62 #include "utilities/macros.hpp"
   63 #include "vmreg_x86.inline.hpp"
   64 #ifdef COMPILER2
   65 #include "opto/output.hpp"
   66 #endif
   67 
   68 #ifdef PRODUCT
   69 #define BLOCK_COMMENT(str) /* nothing */
   70 #define STOP(error) stop(error)
   71 #else
   72 #define BLOCK_COMMENT(str) block_comment(str)
   73 #define STOP(error) block_comment(error); stop(error)
   74 #endif
   75 
   76 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   77 
   78 #ifdef ASSERT
   79 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   80 #endif
   81 
   82 static const Assembler::Condition reverse[] = {
   83     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   84     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   85     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   86     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,

 1294 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1295   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1296   assert_different_registers(arg_1, c_rarg2);
 1297   pass_arg2(this, arg_2);
 1298   pass_arg1(this, arg_1);
 1299   pass_arg0(this, arg_0);
 1300   call_VM_leaf(entry_point, 3);
 1301 }
 1302 
 1303 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1304   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1305   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1306   assert_different_registers(arg_2, c_rarg3);
 1307   pass_arg3(this, arg_3);
 1308   pass_arg2(this, arg_2);
 1309   pass_arg1(this, arg_1);
 1310   pass_arg0(this, arg_0);
 1311   call_VM_leaf(entry_point, 3);
 1312 }
 1313 
 1314 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1315   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1316 }
 1317 
 1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1319   pass_arg0(this, arg_0);
 1320   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1321 }
 1322 
 1323 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1324   assert_different_registers(arg_0, c_rarg1);
 1325   pass_arg1(this, arg_1);
 1326   pass_arg0(this, arg_0);
 1327   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1328 }
 1329 
 1330 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1331   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1332   assert_different_registers(arg_1, c_rarg2);
 1333   pass_arg2(this, arg_2);
 1334   pass_arg1(this, arg_1);
 1335   pass_arg0(this, arg_0);
 1336   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1337 }

 2351     lea(rscratch, src);
 2352     Assembler::mulss(dst, Address(rscratch, 0));
 2353   }
 2354 }
 2355 
 2356 void MacroAssembler::null_check(Register reg, int offset) {
 2357   if (needs_explicit_null_check(offset)) {
 2358     // provoke OS null exception if reg is null by
 2359     // accessing M[reg] w/o changing any (non-CC) registers
 2360     // NOTE: cmpl is plenty here to provoke a segv
 2361     cmpptr(rax, Address(reg, 0));
 2362     // Note: should probably use testl(rax, Address(reg, 0));
 2363     //       may be shorter code (however, this version of
 2364     //       testl needs to be implemented first)
 2365   } else {
 2366     // nothing to do, (later) access of M[reg + offset]
 2367     // will provoke OS null exception if reg is null
 2368   }
 2369 }
 2370 
 2371 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2372   andptr(markword, markWord::inline_type_mask_in_place);
 2373   cmpptr(markword, markWord::inline_type_pattern);
 2374   jcc(Assembler::equal, is_inline_type);
 2375 }
 2376 
 2377 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
 2378   if (can_be_null) {
 2379     testptr(object, object);
 2380     jcc(Assembler::zero, not_inline_type);
 2381   }
 2382   const int is_inline_type_mask = markWord::inline_type_pattern;
 2383   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2384   andptr(tmp, is_inline_type_mask);
 2385   cmpptr(tmp, is_inline_type_mask);
 2386   jcc(Assembler::notEqual, not_inline_type);
 2387 }
 2388 
 2389 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2390   movl(temp_reg, flags);
 2391   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2392   jcc(Assembler::notEqual, is_null_free_inline_type);
 2393 }
 2394 
 2395 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2396   movl(temp_reg, flags);
 2397   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2398   jcc(Assembler::equal, not_null_free_inline_type);
 2399 }
 2400 
 2401 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
 2402   movl(temp_reg, flags);
 2403   testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
 2404   jcc(Assembler::notEqual, is_flat);
 2405 }
 2406 
 2407 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
 2408   movl(temp_reg, flags);
 2409   testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
 2410   jcc(Assembler::notEqual, has_null_marker);
 2411 }
 2412 
 2413 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2414   Label test_mark_word;
 2415   // load mark word
 2416   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2417   // check displaced
 2418   testl(temp_reg, markWord::unlocked_value);
 2419   jccb(Assembler::notZero, test_mark_word);
 2420   // slow path use klass prototype
 2421   push(rscratch1);
 2422   load_prototype_header(temp_reg, oop, rscratch1);
 2423   pop(rscratch1);
 2424 
 2425   bind(test_mark_word);
 2426   testl(temp_reg, test_bit);
 2427   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2428 }
 2429 
 2430 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
 2431                                          Label& is_flat_array) {
 2432 #ifdef _LP64
 2433   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
 2434 #else
 2435   load_klass(temp_reg, oop, noreg);
 2436   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2437   test_flat_array_layout(temp_reg, is_flat_array);
 2438 #endif
 2439 }
 2440 
 2441 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
 2442                                              Label& is_non_flat_array) {
 2443 #ifdef _LP64
 2444   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
 2445 #else
 2446   load_klass(temp_reg, oop, noreg);
 2447   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2448   test_non_flat_array_layout(temp_reg, is_non_flat_array);
 2449 #endif
 2450 }
 2451 
 2452 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 2453 #ifdef _LP64
 2454   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 2455 #else
 2456   Unimplemented();
 2457 #endif
 2458 }
 2459 
 2460 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 2461 #ifdef _LP64
 2462   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 2463 #else
 2464   Unimplemented();
 2465 #endif
 2466 }
 2467 
 2468 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
 2469   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2470   jcc(Assembler::notZero, is_flat_array);
 2471 }
 2472 
 2473 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
 2474   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2475   jcc(Assembler::zero, is_non_flat_array);
 2476 }
 2477 
 2478 void MacroAssembler::os_breakpoint() {
 2479   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2480   // (e.g., MSVC can't call ps() otherwise)
 2481   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2482 }
 2483 
 2484 void MacroAssembler::unimplemented(const char* what) {
 2485   const char* buf = nullptr;
 2486   {
 2487     ResourceMark rm;
 2488     stringStream ss;
 2489     ss.print("unimplemented: %s", what);
 2490     buf = code_string(ss.as_string());
 2491   }
 2492   stop(buf);
 2493 }
 2494 
 2495 #define XSTATE_BV 0x200
 2496 
 2497 void MacroAssembler::pop_CPU_state() {

 3540 }
 3541 
 3542 // C++ bool manipulation
 3543 void MacroAssembler::testbool(Register dst) {
 3544   if(sizeof(bool) == 1)
 3545     testb(dst, 0xff);
 3546   else if(sizeof(bool) == 2) {
 3547     // testw implementation needed for two byte bools
 3548     ShouldNotReachHere();
 3549   } else if(sizeof(bool) == 4)
 3550     testl(dst, dst);
 3551   else
 3552     // unsupported
 3553     ShouldNotReachHere();
 3554 }
 3555 
 3556 void MacroAssembler::testptr(Register dst, Register src) {
 3557   testq(dst, src);
 3558 }
 3559 
 3560 // Object / value buffer allocation...
 3561 //
 3562 // Kills klass and rsi on LP64
 3563 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 3564                                        Register t1, Register t2,
 3565                                        bool clear_fields, Label& alloc_failed)
 3566 {
 3567   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 3568   Register layout_size = t1;
 3569   assert(new_obj == rax, "needs to be rax");
 3570   assert_different_registers(klass, new_obj, t1, t2);
 3571 
 3572   // get instance_size in InstanceKlass (scaled to a count of bytes)
 3573   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 3574   // test to see if it is malformed in some way
 3575   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 3576   jcc(Assembler::notZero, slow_case_no_pop);
 3577 
 3578   // Allocate the instance:
 3579   //  If TLAB is enabled:
 3580   //    Try to allocate in the TLAB.
 3581   //    If fails, go to the slow path.
 3582   //  Else If inline contiguous allocations are enabled:
 3583   //    Try to allocate in eden.
 3584   //    If fails due to heap end, go to slow path.
 3585   //
 3586   //  If TLAB is enabled OR inline contiguous is enabled:
 3587   //    Initialize the allocation.
 3588   //    Exit.
 3589   //
 3590   //  Go to slow path.
 3591 
 3592   push(klass);
 3593   if (UseTLAB) {
 3594     tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
 3595     if (ZeroTLAB || (!clear_fields)) {
 3596       // the fields have been already cleared
 3597       jmp(initialize_header);
 3598     } else {
 3599       // initialize both the header and fields
 3600       jmp(initialize_object);
 3601     }
 3602   } else {
 3603     jmp(slow_case);
 3604   }
 3605 
 3606   // If UseTLAB is true, the object is created above and there is an initialize need.
 3607   // Otherwise, skip and go to the slow path.
 3608   if (UseTLAB) {
 3609     if (clear_fields) {
 3610       // The object is initialized before the header.  If the object size is
 3611       // zero, go directly to the header initialization.
 3612       bind(initialize_object);
 3613       if (UseCompactObjectHeaders) {
 3614         assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
 3615         decrement(layout_size, oopDesc::base_offset_in_bytes());
 3616       } else {
 3617         decrement(layout_size, sizeof(oopDesc));
 3618       }
 3619       jcc(Assembler::zero, initialize_header);
 3620 
 3621       // Initialize topmost object field, divide size by 8, check if odd and
 3622       // test if zero.
 3623       Register zero = klass;
 3624       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 3625       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 3626 
 3627   #ifdef ASSERT
 3628       // make sure instance_size was multiple of 8
 3629       Label L;
 3630       // Ignore partial flag stall after shrl() since it is debug VM
 3631       jcc(Assembler::carryClear, L);
 3632       stop("object size is not multiple of 2 - adjust this code");
 3633       bind(L);
 3634       // must be > 0, no extra check needed here
 3635   #endif
 3636 
 3637       // initialize remaining object fields: instance_size was a multiple of 8
 3638       {
 3639         Label loop;
 3640         bind(loop);
 3641         int header_size_bytes = oopDesc::header_size() * HeapWordSize;
 3642         assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
 3643         movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
 3644         decrement(layout_size);
 3645         jcc(Assembler::notZero, loop);
 3646       }
 3647     } // clear_fields
 3648 
 3649     // initialize object header only.
 3650     bind(initialize_header);
 3651     if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
 3652       pop(klass);
 3653       Register mark_word = t2;
 3654       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 3655       movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 3656     } else {
 3657      movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
 3658             (intptr_t)markWord::prototype().value()); // header
 3659      pop(klass);   // get saved klass back in the register.
 3660     }
 3661     if (!UseCompactObjectHeaders) {
 3662       xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 3663       store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 3664       movptr(t2, klass);         // preserve klass
 3665       store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 3666     }
 3667     jmp(done);
 3668   }
 3669 
 3670   bind(slow_case);
 3671   pop(klass);
 3672   bind(slow_case_no_pop);
 3673   jmp(alloc_failed);
 3674 
 3675   bind(done);
 3676 }
 3677 
 3678 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3679 void MacroAssembler::tlab_allocate(Register obj,
 3680                                    Register var_size_in_bytes,
 3681                                    int con_size_in_bytes,
 3682                                    Register t1,
 3683                                    Register t2,
 3684                                    Label& slow_case) {
 3685   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3686   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3687 }
 3688 
 3689 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3690   RegSet regs;
 3691   regs += RegSet::of(rax, rcx, rdx);
 3692 #ifndef _WINDOWS
 3693   regs += RegSet::of(rsi, rdi);
 3694 #endif
 3695   regs += RegSet::range(r8, r11);
 3696   if (UseAPX) {
 3697     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));

 3861   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3862   if (UseIncDec) {
 3863     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3864   } else {
 3865     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3866     shrptr(index, 1);
 3867   }
 3868 
 3869   // initialize remaining object fields: index is a multiple of 2 now
 3870   {
 3871     Label loop;
 3872     bind(loop);
 3873     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3874     decrement(index);
 3875     jcc(Assembler::notZero, loop);
 3876   }
 3877 
 3878   bind(done);
 3879 }
 3880 
 3881 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
 3882   movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
 3883 #ifdef ASSERT
 3884   {
 3885     Label done;
 3886     cmpptr(layout_info, 0);
 3887     jcc(Assembler::notEqual, done);
 3888     stop("inline_layout_info_array is null");
 3889     bind(done);
 3890   }
 3891 #endif
 3892 
 3893   InlineLayoutInfo array[2];
 3894   int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
 3895   if (is_power_of_2(size)) {
 3896     shll(index, log2i_exact(size)); // Scale index by power of 2
 3897   } else {
 3898     imull(index, index, size); // Scale the index to be the entry index * array_element_size
 3899   }
 3900   lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
 3901 }
 3902 
 3903 // Look up the method for a megamorphic invokeinterface call.
 3904 // The target method is determined by <intf_klass, itable_index>.
 3905 // The receiver klass is in recv_klass.
 3906 // On success, the result will be in method_result, and execution falls through.
 3907 // On failure, execution transfers to the given label.
 3908 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3909                                              Register intf_klass,
 3910                                              RegisterOrConstant itable_index,
 3911                                              Register method_result,
 3912                                              Register scan_temp,
 3913                                              Label& L_no_such_interface,
 3914                                              bool return_method) {
 3915   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3916   assert_different_registers(method_result, intf_klass, scan_temp);
 3917   assert(recv_klass != method_result || !return_method,
 3918          "recv_klass can be destroyed when method isn't needed");
 3919 
 3920   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3921          "caller must use same register for non-constant itable index as for method");
 3922 

 4933   } else {
 4934     Label L;
 4935     jccb(negate_condition(cc), L);
 4936     movl(dst, src);
 4937     bind(L);
 4938   }
 4939 }
 4940 
 4941 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4942   if (VM_Version::supports_cmov()) {
 4943     cmovl(cc, dst, src);
 4944   } else {
 4945     Label L;
 4946     jccb(negate_condition(cc), L);
 4947     movl(dst, src);
 4948     bind(L);
 4949   }
 4950 }
 4951 
 4952 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4953   if (!VerifyOops || VerifyAdapterSharing) {
 4954     // Below address of the code string confuses VerifyAdapterSharing
 4955     // because it may differ between otherwise equivalent adapters.
 4956     return;
 4957   }
 4958 
 4959   BLOCK_COMMENT("verify_oop {");
 4960   push(rscratch1);
 4961   push(rax);                          // save rax
 4962   push(reg);                          // pass register argument
 4963 
 4964   // Pass register number to verify_oop_subroutine
 4965   const char* b = nullptr;
 4966   {
 4967     ResourceMark rm;
 4968     stringStream ss;
 4969     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4970     b = code_string(ss.as_string());
 4971   }
 4972   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4973   pushptr(buffer.addr(), rscratch1);
 4974 
 4975   // call indirectly to solve generation ordering problem
 4976   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4977   call(rax);

 5193   // or something else. Since this is a slow path, we can optimize for code density,
 5194   // and just restart the search from the beginning.
 5195   jmpb(L_restart);
 5196 
 5197   // Counter updates:
 5198 
 5199   // Increment polymorphic counter instead of receiver slot.
 5200   bind(L_polymorphic);
 5201   movptr(offset, poly_count_offset);
 5202   jmpb(L_count_update);
 5203 
 5204   // Found a receiver, convert its slot offset to corresponding count offset.
 5205   bind(L_found_recv);
 5206   addptr(offset, receiver_to_count_step);
 5207 
 5208   bind(L_count_update);
 5209   addptr(Address(mdp, offset, Address::times_ptr), DataLayout::counter_increment);
 5210 }
 5211 
 5212 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5213   if (!VerifyOops || VerifyAdapterSharing) {
 5214     // Below address of the code string confuses VerifyAdapterSharing
 5215     // because it may differ between otherwise equivalent adapters.
 5216     return;
 5217   }
 5218 
 5219   push(rscratch1);
 5220   push(rax); // save rax,
 5221   // addr may contain rsp so we will have to adjust it based on the push
 5222   // we just did (and on 64 bit we do two pushes)
 5223   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5224   // stores rax into addr which is backwards of what was intended.
 5225   if (addr.uses(rsp)) {
 5226     lea(rax, addr);
 5227     pushptr(Address(rax, 2 * BytesPerWord));
 5228   } else {
 5229     pushptr(addr);
 5230   }
 5231 
 5232   // Pass register number to verify_oop_subroutine
 5233   const char* b = nullptr;
 5234   {
 5235     ResourceMark rm;
 5236     stringStream ss;
 5237     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);

 5591 
 5592 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5593   // get mirror
 5594   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5595   load_method_holder(mirror, method);
 5596   movptr(mirror, Address(mirror, mirror_offset));
 5597   resolve_oop_handle(mirror, tmp);
 5598 }
 5599 
 5600 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5601   load_method_holder(rresult, rmethod);
 5602   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5603 }
 5604 
 5605 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5606   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5607   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5608   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5609 }
 5610 
 5611 void MacroAssembler::load_metadata(Register dst, Register src) {
 5612   if (UseCompactObjectHeaders) {
 5613     load_narrow_klass_compact(dst, src);
 5614   } else if (UseCompressedClassPointers) {
 5615     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5616   } else {
 5617     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5618   }
 5619 }
 5620 
 5621 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5622   assert(UseCompactObjectHeaders, "expect compact object headers");
 5623   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5624   shrq(dst, markWord::klass_shift);
 5625 }
 5626 
 5627 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5628   assert_different_registers(src, tmp);
 5629   assert_different_registers(dst, tmp);
 5630 
 5631   if (UseCompactObjectHeaders) {
 5632     load_narrow_klass_compact(dst, src);
 5633     decode_klass_not_null(dst, tmp);
 5634   } else if (UseCompressedClassPointers) {
 5635     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5636     decode_klass_not_null(dst, tmp);
 5637   } else {
 5638     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5639   }
 5640 }
 5641 
 5642 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5643   load_klass(dst, src, tmp);
 5644   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5645 }
 5646 
 5647 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5648   assert(!UseCompactObjectHeaders, "not with compact headers");
 5649   assert_different_registers(src, tmp);
 5650   assert_different_registers(dst, tmp);
 5651   if (UseCompressedClassPointers) {
 5652     encode_klass_not_null(src, tmp);
 5653     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5654   } else {
 5655     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5656   }
 5657 }
 5658 
 5659 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5660   if (UseCompactObjectHeaders) {
 5661     assert(tmp != noreg, "need tmp");
 5662     assert_different_registers(klass, obj, tmp);
 5663     load_narrow_klass_compact(tmp, obj);
 5664     cmpl(klass, tmp);
 5665   } else if (UseCompressedClassPointers) {
 5666     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));

 5692   bool as_raw = (decorators & AS_RAW) != 0;
 5693   if (as_raw) {
 5694     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5695   } else {
 5696     bs->load_at(this, decorators, type, dst, src, tmp1);
 5697   }
 5698 }
 5699 
 5700 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5701                                      Register tmp1, Register tmp2, Register tmp3) {
 5702   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5703   decorators = AccessInternal::decorator_fixup(decorators, type);
 5704   bool as_raw = (decorators & AS_RAW) != 0;
 5705   if (as_raw) {
 5706     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5707   } else {
 5708     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5709   }
 5710 }
 5711 
 5712 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
 5713                                      Register inline_layout_info) {
 5714   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5715   bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
 5716 }
 5717 
 5718 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
 5719   movptr(offset, Address(inline_klass, InlineKlass::adr_members_offset()));
 5720   movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
 5721 }
 5722 
 5723 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
 5724   // ((address) (void*) o) + vk->payload_offset();
 5725   Register offset = (data == oop) ? rscratch1 : data;
 5726   payload_offset(inline_klass, offset);
 5727   if (data == oop) {
 5728     addptr(data, offset);
 5729   } else {
 5730     lea(data, Address(oop, offset));
 5731   }
 5732 }
 5733 
 5734 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5735                                                 Register index, Register data) {
 5736   assert(index != rcx, "index needs to shift by rcx");
 5737   assert_different_registers(array, array_klass, index);
 5738   assert_different_registers(rcx, array, index);
 5739 
 5740   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5741   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5742 
 5743   // Klass::layout_helper_log2_element_size(lh)
 5744   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5745   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5746   andl(rcx, Klass::_lh_log2_element_size_mask);
 5747   shlptr(index); // index << rcx
 5748 
 5749   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
 5750 }
 5751 
 5752 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5753   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5754 }
 5755 
 5756 // Doesn't do verification, generates fixed size code
 5757 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5758   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5759 }
 5760 
 5761 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5762                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5763   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5764 }
 5765 
 5766 // Used for storing nulls.
 5767 void MacroAssembler::store_heap_oop_null(Address dst) {
 5768   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5769 }
 5770 
 5771 void MacroAssembler::store_klass_gap(Register dst, Register src) {

 6088   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 6089   int klass_index = oop_recorder()->find_index(k);
 6090   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 6091   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 6092 }
 6093 
 6094 void MacroAssembler::reinit_heapbase() {
 6095   if (UseCompressedOops) {
 6096     if (Universe::heap() != nullptr) {
 6097       if (CompressedOops::base() == nullptr) {
 6098         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 6099       } else {
 6100         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 6101       }
 6102     } else {
 6103       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 6104     }
 6105   }
 6106 }
 6107 
 6108 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 6109   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 6110   // An inline type might be returned. If fields are in registers we
 6111   // need to allocate an inline type instance and initialize it with
 6112   // the value of the fields.
 6113   Label skip;
 6114   // We only need a new buffered inline type if a new one is not returned
 6115   testptr(rax, 1);
 6116   jcc(Assembler::zero, skip);
 6117   int call_offset = -1;
 6118 
 6119 #ifdef _LP64
 6120   // The following code is similar to allocate_instance but has some slight differences,
 6121   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 6122   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
 6123   Label slow_case;
 6124   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 6125   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 6126   if (vk != nullptr) {
 6127     // Called from C1, where the return type is statically known.
 6128     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 6129     jint lh = vk->layout_helper();
 6130     assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 6131     if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
 6132       tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
 6133     } else {
 6134       jmp(slow_case);
 6135     }
 6136   } else {
 6137     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 6138     mov(rbx, rax);
 6139     andptr(rbx, -2);
 6140     if (UseTLAB) {
 6141       movl(r14, Address(rbx, Klass::layout_helper_offset()));
 6142       testl(r14, Klass::_lh_instance_slow_path_bit);
 6143       jcc(Assembler::notZero, slow_case);
 6144       tlab_allocate(rax, r14, 0, r13, r14, slow_case);
 6145     } else {
 6146       jmp(slow_case);
 6147     }
 6148   }
 6149   if (UseTLAB) {
 6150     // 2. Initialize buffered inline instance header
 6151     Register buffer_obj = rax;
 6152     Register klass = rbx;
 6153     if (UseCompactObjectHeaders) {
 6154       Register mark_word = r13;
 6155       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 6156       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), mark_word);
 6157     } else {
 6158       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 6159       xorl(r13, r13);
 6160       store_klass_gap(buffer_obj, r13);
 6161       if (vk == nullptr) {
 6162         // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 6163         mov(r13, klass);
 6164       }
 6165       store_klass(buffer_obj, klass, rscratch1);
 6166       klass = r13;
 6167     }
 6168     // 3. Initialize its fields with an inline class specific handler
 6169     if (vk != nullptr) {
 6170       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 6171     } else {
 6172       movptr(rbx, Address(klass, InlineKlass::adr_members_offset()));
 6173       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 6174       call(rbx);
 6175     }
 6176     jmp(skip);
 6177   }
 6178   bind(slow_case);
 6179   // We failed to allocate a new inline type, fall back to a runtime
 6180   // call. Some oop field may be live in some registers but we can't
 6181   // tell. That runtime call will take care of preserving them
 6182   // across a GC if there's one.
 6183   mov(rax, rscratch1);
 6184 #endif
 6185 
 6186   if (from_interpreter) {
 6187     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 6188   } else {
 6189     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 6190     call_offset = offset();
 6191   }
 6192 
 6193   bind(skip);
 6194   return call_offset;
 6195 }
 6196 
 6197 // Move a value between registers/stack slots and update the reg_state
 6198 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6199   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6200   if (reg_state[to->value()] == reg_written) {
 6201     return true; // Already written
 6202   }
 6203   if (from != to && bt != T_VOID) {
 6204     if (reg_state[to->value()] == reg_readonly) {
 6205       return false; // Not yet writable
 6206     }
 6207     if (from->is_reg()) {
 6208       if (to->is_reg()) {
 6209         if (from->is_XMMRegister()) {
 6210           if (bt == T_DOUBLE) {
 6211             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6212           } else {
 6213             assert(bt == T_FLOAT, "must be float");
 6214             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6215           }
 6216         } else {
 6217           movq(to->as_Register(), from->as_Register());
 6218         }
 6219       } else {
 6220         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6221         Address to_addr = Address(rsp, st_off);
 6222         if (from->is_XMMRegister()) {
 6223           if (bt == T_DOUBLE) {
 6224             movdbl(to_addr, from->as_XMMRegister());
 6225           } else {
 6226             assert(bt == T_FLOAT, "must be float");
 6227             movflt(to_addr, from->as_XMMRegister());
 6228           }
 6229         } else {
 6230           movq(to_addr, from->as_Register());
 6231         }
 6232       }
 6233     } else {
 6234       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6235       if (to->is_reg()) {
 6236         if (to->is_XMMRegister()) {
 6237           if (bt == T_DOUBLE) {
 6238             movdbl(to->as_XMMRegister(), from_addr);
 6239           } else {
 6240             assert(bt == T_FLOAT, "must be float");
 6241             movflt(to->as_XMMRegister(), from_addr);
 6242           }
 6243         } else {
 6244           movq(to->as_Register(), from_addr);
 6245         }
 6246       } else {
 6247         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6248         movq(r13, from_addr);
 6249         movq(Address(rsp, st_off), r13);
 6250       }
 6251     }
 6252   }
 6253   // Update register states
 6254   reg_state[from->value()] = reg_writable;
 6255   reg_state[to->value()] = reg_written;
 6256   return true;
 6257 }
 6258 
 6259 // Calculate the extra stack space required for packing or unpacking inline
 6260 // args and adjust the stack pointer.
 6261 //
 6262 // This extra stack space take into account the copy #2 of the return address,
 6263 // but NOT the saved RBP or the normal size of the frame (see MacroAssembler::remove_frame
 6264 // for notations).
 6265 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6266   // Two additional slots to account for return address
 6267   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6268   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6269   // Save the return address, adjust the stack (make sure it is properly
 6270   // 16-byte aligned) and copy the return address to the new top of the stack.
 6271   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6272   assert(sp_inc > 0, "sanity");
 6273   pop(r13);
 6274   subptr(rsp, sp_inc);
 6275 #ifdef ASSERT
 6276   movl(Address(rsp, -VMRegImpl::stack_slot_size), badRegWordVal);
 6277   movl(Address(rsp, -2 * VMRegImpl::stack_slot_size), badRegWordVal);
 6278   subptr(rsp, 2 * VMRegImpl::stack_slot_size);
 6279 #else
 6280   push(r13);
 6281 #endif
 6282   return sp_inc;
 6283 }
 6284 
 6285 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6286 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6287                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6288                                           RegState reg_state[]) {
 6289   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6290   assert(from->is_valid(), "source must be valid");
 6291   bool progress = false;
 6292 #ifdef ASSERT
 6293   const int start_offset = offset();
 6294 #endif
 6295 
 6296   Label L_null, L_notNull;
 6297   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6298   Register tmp1 = r10;
 6299   Register tmp2 = r13;
 6300   Register fromReg = noreg;
 6301   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, true);
 6302   bool done = true;
 6303   bool mark_done = true;
 6304   VMReg toReg;
 6305   BasicType bt;
 6306   // Check if argument requires a null check
 6307   bool null_check = false;
 6308   VMReg nullCheckReg;
 6309   while (stream.next(nullCheckReg, bt)) {
 6310     if (sig->at(stream.sig_index())._offset == -1) {
 6311       null_check = true;
 6312       break;
 6313     }
 6314   }
 6315   stream.reset(sig_index, to_index);
 6316   while (stream.next(toReg, bt)) {
 6317     assert(toReg->is_valid(), "destination must be valid");
 6318     int idx = (int)toReg->value();
 6319     if (reg_state[idx] == reg_readonly) {
 6320       if (idx != from->value()) {
 6321         mark_done = false;
 6322       }
 6323       done = false;
 6324       continue;
 6325     } else if (reg_state[idx] == reg_written) {
 6326       continue;
 6327     }
 6328     assert(reg_state[idx] == reg_writable, "must be writable");
 6329     reg_state[idx] = reg_written;
 6330     progress = true;
 6331 
 6332     if (fromReg == noreg) {
 6333       if (from->is_reg()) {
 6334         fromReg = from->as_Register();
 6335       } else {
 6336         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6337         movq(tmp1, Address(rsp, st_off));
 6338         fromReg = tmp1;
 6339       }
 6340       if (null_check) {
 6341         // Nullable inline type argument, emit null check
 6342         testptr(fromReg, fromReg);
 6343         jcc(Assembler::zero, L_null);
 6344       }
 6345     }
 6346     int off = sig->at(stream.sig_index())._offset;
 6347     if (off == -1) {
 6348       assert(null_check, "Missing null check at");
 6349       if (toReg->is_stack()) {
 6350         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6351         movq(Address(rsp, st_off), 1);
 6352       } else {
 6353         movq(toReg->as_Register(), 1);
 6354       }
 6355       continue;
 6356     }
 6357     assert(off > 0, "offset in object should be positive");
 6358     Address fromAddr = Address(fromReg, off);
 6359     if (!toReg->is_XMMRegister()) {
 6360       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6361       if (is_reference_type(bt)) {
 6362         load_heap_oop(dst, fromAddr);
 6363       } else {
 6364         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6365         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6366       }
 6367       if (toReg->is_stack()) {
 6368         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6369         movq(Address(rsp, st_off), dst);
 6370       }
 6371     } else if (bt == T_DOUBLE) {
 6372       movdbl(toReg->as_XMMRegister(), fromAddr);
 6373     } else {
 6374       assert(bt == T_FLOAT, "must be float");
 6375       movflt(toReg->as_XMMRegister(), fromAddr);
 6376     }
 6377   }
 6378   if (progress && null_check) {
 6379     if (done) {
 6380       jmp(L_notNull);
 6381       bind(L_null);
 6382       // Set null marker to zero to signal that the argument is null.
 6383       // Also set all oop fields to zero to make the GC happy.
 6384       stream.reset(sig_index, to_index);
 6385       while (stream.next(toReg, bt)) {
 6386         if (sig->at(stream.sig_index())._offset == -1 ||
 6387             bt == T_OBJECT || bt == T_ARRAY) {
 6388           if (toReg->is_stack()) {
 6389             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6390             movq(Address(rsp, st_off), 0);
 6391           } else {
 6392             xorq(toReg->as_Register(), toReg->as_Register());
 6393           }
 6394         }
 6395       }
 6396       bind(L_notNull);
 6397     } else {
 6398       bind(L_null);
 6399     }
 6400   }
 6401 
 6402   sig_index = stream.sig_index();
 6403   to_index = stream.regs_index();
 6404 
 6405   if (mark_done && reg_state[from->value()] != reg_written) {
 6406     // This is okay because no one else will write to that slot
 6407     reg_state[from->value()] = reg_writable;
 6408   }
 6409   from_index--;
 6410   assert(progress || (start_offset == offset()), "should not emit code");
 6411   return done;
 6412 }
 6413 
 6414 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6415                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6416                                         RegState reg_state[], Register val_array) {
 6417   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
 6418   assert(to->is_valid(), "destination must be valid");
 6419 
 6420   if (reg_state[to->value()] == reg_written) {
 6421     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6422     return true; // Already written
 6423   }
 6424 
 6425   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6426   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6427   Register val_obj_tmp = r11;
 6428   Register from_reg_tmp = r14;
 6429   Register tmp1 = r10;
 6430   Register tmp2 = r13;
 6431   Register tmp3 = rbx;
 6432   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6433 
 6434   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6435 
 6436   if (reg_state[to->value()] == reg_readonly) {
 6437     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6438       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6439       return false; // Not yet writable
 6440     }
 6441     val_obj = val_obj_tmp;
 6442   }
 6443 
 6444   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
 6445   load_heap_oop(val_obj, Address(val_array, index));
 6446 
 6447   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6448   VMReg fromReg;
 6449   BasicType bt;
 6450   Label L_null;
 6451   while (stream.next(fromReg, bt)) {
 6452     assert(fromReg->is_valid(), "source must be valid");
 6453     reg_state[fromReg->value()] = reg_writable;
 6454 
 6455     int off = sig->at(stream.sig_index())._offset;
 6456     if (off == -1) {
 6457       // Nullable inline type argument, emit null check
 6458       Label L_notNull;
 6459       if (fromReg->is_stack()) {
 6460         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6461         testb(Address(rsp, ld_off), 1);
 6462       } else {
 6463         testb(fromReg->as_Register(), 1);
 6464       }
 6465       jcc(Assembler::notZero, L_notNull);
 6466       movptr(val_obj, 0);
 6467       jmp(L_null);
 6468       bind(L_notNull);
 6469       continue;
 6470     }
 6471 
 6472     assert(off > 0, "offset in object should be positive");
 6473     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6474 
 6475     // Pack the scalarized field into the value object.
 6476     Address dst(val_obj, off);
 6477     if (!fromReg->is_XMMRegister()) {
 6478       Register src;
 6479       if (fromReg->is_stack()) {
 6480         src = from_reg_tmp;
 6481         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6482         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6483       } else {
 6484         src = fromReg->as_Register();
 6485       }
 6486       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6487       if (is_reference_type(bt)) {
 6488         // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep val_obj valid.
 6489         mov(tmp3, val_obj);
 6490         Address dst_with_tmp3(tmp3, off);
 6491         store_heap_oop(dst_with_tmp3, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6492       } else {
 6493         store_sized_value(dst, src, size_in_bytes);
 6494       }
 6495     } else if (bt == T_DOUBLE) {
 6496       movdbl(dst, fromReg->as_XMMRegister());
 6497     } else {
 6498       assert(bt == T_FLOAT, "must be float");
 6499       movflt(dst, fromReg->as_XMMRegister());
 6500     }
 6501   }
 6502   bind(L_null);
 6503   sig_index = stream.sig_index();
 6504   from_index = stream.regs_index();
 6505 
 6506   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6507   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6508   assert(success, "to register must be writeable");
 6509   return true;
 6510 }
 6511 
 6512 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6513   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6514 }
 6515 
 6516 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6517   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6518   if (needs_stack_repair) {
 6519     // The method has a scalarized entry point (where fields of value object arguments
 6520     // are passed through registers and stack), and a non-scalarized entry point (where
 6521     // value object arguments are given as oops). The non-scalarized entry point will
 6522     // first load each field of value object arguments and store them in registers and on
 6523     // the stack in a way compatible with the scalarized entry point. To do so, some extra
 6524     // stack space might be reserved (if argument registers are not enough). On leaving the
 6525     // method, this space must be freed.
 6526     //
 6527     // In case we used the non-scalarized entry point the stack looks like this:
 6528     //
 6529     // | Arguments from caller     |
 6530     // |---------------------------|  <-- caller's SP
 6531     // | Return address #1         |
 6532     // |---------------------------|
 6533     // | Extension space for       |
 6534     // |   inline arg (un)packing  |
 6535     // |---------------------------|
 6536     // | Return address #2         |
 6537     // | Saved RBP                 |
 6538     // |---------------------------|  <-- start of this method's frame
 6539     // | sp_inc                    |
 6540     // | method locals             |
 6541     // |---------------------------|  <-- SP
 6542     //
 6543     // There is two copies of the return address on the stack. They will be identical at
 6544     // first, but that can change.
 6545     // If the caller has been deoptimized, the copy #1 will be patched to point at the
 6546     // deopt blob, and the copy #2 will still point into the old method. In short
 6547     // the copy #2 is not reliable and should not be used. It is mostly needed to
 6548     // add space between the extension space and the locals, as there would be between
 6549     // the real arguments and the locals if we don't need to do unpacking (from the
 6550     // scalarized entry point).
 6551     //
 6552     // When leaving, one must use the copy #1 of the return address, while keeping in mind
 6553     // that from the scalarized entry point, there will be only one copy. Indeed, in the
 6554     // case we used the scalarized calling convention, the stack looks like this:
 6555     //
 6556     // | Arguments from caller     |
 6557     // |---------------------------|  <-- caller's SP
 6558     // | Return address            |
 6559     // | Saved RBP                 |
 6560     // |---------------------------|  <-- start of this method's frame
 6561     // | sp_inc                    |
 6562     // | method locals             |
 6563     // |---------------------------|  <-- SP
 6564     //
 6565     // The sp_inc stack slot holds the total size of the frame, including the extension
 6566     // space the possible copy #2 of the return address and the saved RBP (but never the
 6567     // copy #1 of the return address). That is how to find the copy #1 of the return address.
 6568     // This size is expressed in bytes. Be careful when using it from C++ in pointer arithmetic;
 6569     // you might need to divide it by wordSize.
 6570     //
 6571     // One can find sp_inc since the start the method's frame is SP + initial_framesize.
 6572 
 6573     movq(rbp, Address(rsp, initial_framesize));
 6574     // The stack increment resides just below the saved rbp
 6575     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6576   } else {
 6577     if (initial_framesize > 0) {
 6578       addq(rsp, initial_framesize);
 6579     }
 6580     pop(rbp);
 6581   }
 6582 }
 6583 
 6584 #if COMPILER2_OR_JVMCI
 6585 
 6586 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 6587 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 6588   // cnt - number of qwords (8-byte words).
 6589   // base - start address, qword aligned.
 6590   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 6591   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 6592   if (use64byteVector) {
 6593     evpbroadcastq(xtmp, val, AVX_512bit);
 6594   } else if (MaxVectorSize >= 32) {
 6595     movdq(xtmp, val);
 6596     punpcklqdq(xtmp, xtmp);
 6597     vinserti128_high(xtmp, xtmp);
 6598   } else {
 6599     movdq(xtmp, val);
 6600     punpcklqdq(xtmp, xtmp);
 6601   }
 6602   jmp(L_zero_64_bytes);
 6603 
 6604   BIND(L_loop);
 6605   if (MaxVectorSize >= 32) {
 6606     fill64(base, 0, xtmp, use64byteVector);
 6607   } else {
 6608     movdqu(Address(base,  0), xtmp);
 6609     movdqu(Address(base, 16), xtmp);
 6610     movdqu(Address(base, 32), xtmp);
 6611     movdqu(Address(base, 48), xtmp);
 6612   }
 6613   addptr(base, 64);
 6614 
 6615   BIND(L_zero_64_bytes);
 6616   subptr(cnt, 8);
 6617   jccb(Assembler::greaterEqual, L_loop);
 6618 
 6619   // Copy trailing 64 bytes
 6620   if (use64byteVector) {
 6621     addptr(cnt, 8);
 6622     jccb(Assembler::equal, L_end);
 6623     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 6624     jmp(L_end);
 6625   } else {
 6626     addptr(cnt, 4);
 6627     jccb(Assembler::less, L_tail);
 6628     if (MaxVectorSize >= 32) {
 6629       vmovdqu(Address(base, 0), xtmp);
 6630     } else {
 6631       movdqu(Address(base,  0), xtmp);
 6632       movdqu(Address(base, 16), xtmp);
 6633     }
 6634   }
 6635   addptr(base, 32);
 6636   subptr(cnt, 4);
 6637 
 6638   BIND(L_tail);
 6639   addptr(cnt, 4);
 6640   jccb(Assembler::lessEqual, L_end);
 6641   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 6642     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 6643   } else {
 6644     decrement(cnt);
 6645 
 6646     BIND(L_sloop);
 6647     movq(Address(base, 0), xtmp);
 6648     addptr(base, 8);
 6649     decrement(cnt);
 6650     jccb(Assembler::greaterEqual, L_sloop);
 6651   }
 6652   BIND(L_end);
 6653 }
 6654 
 6655 // Clearing constant sized memory using YMM/ZMM registers.
 6656 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6657   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 6658   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6659 
 6660   int vector64_count = (cnt & (~0x7)) >> 3;
 6661   cnt = cnt & 0x7;
 6662   const int fill64_per_loop = 4;

 6724         break;
 6725       case 7:
 6726         if (use64byteVector) {
 6727           movl(rtmp, 0x7F);
 6728           kmovwl(mask, rtmp);
 6729           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6730         } else {
 6731           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6732           movl(rtmp, 0x7);
 6733           kmovwl(mask, rtmp);
 6734           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6735         }
 6736         break;
 6737       default:
 6738         fatal("Unexpected length : %d\n",cnt);
 6739         break;
 6740     }
 6741   }
 6742 }
 6743 
 6744 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6745                                bool is_large, bool word_copy_only, KRegister mask) {
 6746   // cnt      - number of qwords (8-byte words).
 6747   // base     - start address, qword aligned.
 6748   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6749   assert(base==rdi, "base register must be edi for rep stos");
 6750   assert(val==rax,   "val register must be eax for rep stos");
 6751   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6752   assert(InitArrayShortSize % BytesPerLong == 0,
 6753     "InitArrayShortSize should be the multiple of BytesPerLong");
 6754 
 6755   Label DONE;



 6756 
 6757   if (!is_large) {
 6758     Label LOOP, LONG;
 6759     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6760     jccb(Assembler::greater, LONG);
 6761 
 6762     decrement(cnt);
 6763     jccb(Assembler::negative, DONE); // Zero length
 6764 
 6765     // Use individual pointer-sized stores for small counts:
 6766     BIND(LOOP);
 6767     movptr(Address(base, cnt, Address::times_ptr), val);
 6768     decrement(cnt);
 6769     jccb(Assembler::greaterEqual, LOOP);
 6770     jmpb(DONE);
 6771 
 6772     BIND(LONG);
 6773   }
 6774 
 6775   // Use longer rep-prefixed ops for non-small counts:
 6776   if (UseFastStosb && !word_copy_only) {
 6777     shlptr(cnt, 3); // convert to number of bytes
 6778     rep_stosb();
 6779   } else if (UseXMMForObjInit) {
 6780     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6781   } else {
 6782     rep_stos();
 6783   }
 6784 
 6785   BIND(DONE);
 6786 }
 6787 
 6788 #endif //COMPILER2_OR_JVMCI
 6789 
 6790 
 6791 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6792                                    Register to, Register value, Register count,
 6793                                    Register rtmp, XMMRegister xtmp) {
 6794   ShortBranchVerifier sbv(this);
 6795   assert_different_registers(to, value, count, rtmp);
 6796   Label L_exit;
 6797   Label L_fill_2_bytes, L_fill_4_bytes;
 6798 
 6799 #if defined(COMPILER2)
 6800   if(MaxVectorSize >=32 &&

10666 
10667   // Load top.
10668   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10669 
10670   // Check if the lock-stack is full.
10671   cmpl(top, LockStack::end_offset());
10672   jcc(Assembler::greaterEqual, slow);
10673 
10674   // Check for recursion.
10675   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10676   jcc(Assembler::equal, push);
10677 
10678   // Check header for monitor (0b10).
10679   testptr(reg_rax, markWord::monitor_value);
10680   jcc(Assembler::notZero, slow);
10681 
10682   // Try to lock. Transition lock bits 0b01 => 0b00
10683   movptr(tmp, reg_rax);
10684   andptr(tmp, ~(int32_t)markWord::unlocked_value);
10685   orptr(reg_rax, markWord::unlocked_value);
10686   // Mask inline_type bit such that we go to the slow path if object is an inline type
10687   andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10688 
10689   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10690   jcc(Assembler::notEqual, slow);
10691 
10692   // Restore top, CAS clobbers register.
10693   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10694 
10695   bind(push);
10696   // After successful lock, push object on lock-stack.
10697   movptr(Address(thread, top), obj);
10698   incrementl(top, oopSize);
10699   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10700 }
10701 
10702 // Implements fast-unlocking.
10703 //
10704 // obj: the object to be unlocked
10705 // reg_rax: rax
10706 // thread: the thread
10707 // tmp: a temporary register
10708 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
< prev index next >