10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "code/compiledIC.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "crc32c.h"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/barrierSetAssembler.hpp"
33 #include "gc/shared/collectedHeap.inline.hpp"
34 #include "gc/shared/tlab_globals.hpp"
35 #include "interpreter/bytecodeHistogram.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "interpreter/interpreterRuntime.hpp"
38 #include "jvm.h"
39 #include "memory/resourceArea.hpp"
40 #include "memory/universe.hpp"
41 #include "oops/accessDecorators.hpp"
42 #include "oops/compressedKlass.inline.hpp"
43 #include "oops/compressedOops.inline.hpp"
44 #include "oops/klass.inline.hpp"
45 #include "prims/methodHandles.hpp"
46 #include "runtime/continuation.hpp"
47 #include "runtime/interfaceSupport.inline.hpp"
48 #include "runtime/javaThread.hpp"
49 #include "runtime/jniHandles.hpp"
50 #include "runtime/objectMonitor.hpp"
51 #include "runtime/os.hpp"
52 #include "runtime/safepoint.hpp"
53 #include "runtime/safepointMechanism.hpp"
54 #include "runtime/sharedRuntime.hpp"
55 #include "runtime/stubRoutines.hpp"
56 #include "utilities/checkedCast.hpp"
57 #include "utilities/macros.hpp"
58
59 #ifdef PRODUCT
60 #define BLOCK_COMMENT(str) /* nothing */
61 #define STOP(error) stop(error)
62 #else
63 #define BLOCK_COMMENT(str) block_comment(str)
64 #define STOP(error) block_comment(error); stop(error)
65 #endif
66
67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
68
69 #ifdef ASSERT
70 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
71 #endif
72
73 static const Assembler::Condition reverse[] = {
74 Assembler::noOverflow /* overflow = 0x0 */ ,
75 Assembler::overflow /* noOverflow = 0x1 */ ,
76 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
77 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
1719 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1720 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
1721 LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
1722 pass_arg2(this, arg_2);
1723 pass_arg1(this, arg_1);
1724 pass_arg0(this, arg_0);
1725 call_VM_leaf(entry_point, 3);
1726 }
1727
1728 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1729 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3));
1730 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3));
1731 LP64_ONLY(assert_different_registers(arg_2, c_rarg3));
1732 pass_arg3(this, arg_3);
1733 pass_arg2(this, arg_2);
1734 pass_arg1(this, arg_1);
1735 pass_arg0(this, arg_0);
1736 call_VM_leaf(entry_point, 3);
1737 }
1738
1739 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1740 pass_arg0(this, arg_0);
1741 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1742 }
1743
1744 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1745 LP64_ONLY(assert_different_registers(arg_0, c_rarg1));
1746 pass_arg1(this, arg_1);
1747 pass_arg0(this, arg_0);
1748 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1749 }
1750
1751 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1752 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
1753 LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
1754 pass_arg2(this, arg_2);
1755 pass_arg1(this, arg_1);
1756 pass_arg0(this, arg_0);
1757 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1758 }
2996 lea(rscratch, src);
2997 Assembler::mulss(dst, Address(rscratch, 0));
2998 }
2999 }
3000
3001 void MacroAssembler::null_check(Register reg, int offset) {
3002 if (needs_explicit_null_check(offset)) {
3003 // provoke OS null exception if reg is null by
3004 // accessing M[reg] w/o changing any (non-CC) registers
3005 // NOTE: cmpl is plenty here to provoke a segv
3006 cmpptr(rax, Address(reg, 0));
3007 // Note: should probably use testl(rax, Address(reg, 0));
3008 // may be shorter code (however, this version of
3009 // testl needs to be implemented first)
3010 } else {
3011 // nothing to do, (later) access of M[reg + offset]
3012 // will provoke OS null exception if reg is null
3013 }
3014 }
3015
3016 void MacroAssembler::os_breakpoint() {
3017 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
3018 // (e.g., MSVC can't call ps() otherwise)
3019 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
3020 }
3021
3022 void MacroAssembler::unimplemented(const char* what) {
3023 const char* buf = nullptr;
3024 {
3025 ResourceMark rm;
3026 stringStream ss;
3027 ss.print("unimplemented: %s", what);
3028 buf = code_string(ss.as_string());
3029 }
3030 stop(buf);
3031 }
3032
3033 #ifdef _LP64
3034 #define XSTATE_BV 0x200
3035 #endif
4183 }
4184
4185 // C++ bool manipulation
4186 void MacroAssembler::testbool(Register dst) {
4187 if(sizeof(bool) == 1)
4188 testb(dst, 0xff);
4189 else if(sizeof(bool) == 2) {
4190 // testw implementation needed for two byte bools
4191 ShouldNotReachHere();
4192 } else if(sizeof(bool) == 4)
4193 testl(dst, dst);
4194 else
4195 // unsupported
4196 ShouldNotReachHere();
4197 }
4198
4199 void MacroAssembler::testptr(Register dst, Register src) {
4200 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
4201 }
4202
4203 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4204 void MacroAssembler::tlab_allocate(Register thread, Register obj,
4205 Register var_size_in_bytes,
4206 int con_size_in_bytes,
4207 Register t1,
4208 Register t2,
4209 Label& slow_case) {
4210 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4211 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
4212 }
4213
4214 RegSet MacroAssembler::call_clobbered_gp_registers() {
4215 RegSet regs;
4216 #ifdef _LP64
4217 regs += RegSet::of(rax, rcx, rdx);
4218 #ifndef _WINDOWS
4219 regs += RegSet::of(rsi, rdi);
4220 #endif
4221 regs += RegSet::range(r8, r11);
4222 #else
4441 // clear topmost word (no jump would be needed if conditional assignment worked here)
4442 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
4443 // index could be 0 now, must check again
4444 jcc(Assembler::zero, done);
4445 bind(even);
4446 }
4447 #endif // !_LP64
4448 // initialize remaining object fields: index is a multiple of 2 now
4449 {
4450 Label loop;
4451 bind(loop);
4452 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
4453 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
4454 decrement(index);
4455 jcc(Assembler::notZero, loop);
4456 }
4457
4458 bind(done);
4459 }
4460
4461 // Look up the method for a megamorphic invokeinterface call.
4462 // The target method is determined by <intf_klass, itable_index>.
4463 // The receiver klass is in recv_klass.
4464 // On success, the result will be in method_result, and execution falls through.
4465 // On failure, execution transfers to the given label.
4466 void MacroAssembler::lookup_interface_method(Register recv_klass,
4467 Register intf_klass,
4468 RegisterOrConstant itable_index,
4469 Register method_result,
4470 Register scan_temp,
4471 Label& L_no_such_interface,
4472 bool return_method) {
4473 assert_different_registers(recv_klass, intf_klass, scan_temp);
4474 assert_different_registers(method_result, intf_klass, scan_temp);
4475 assert(recv_klass != method_result || !return_method,
4476 "recv_klass can be destroyed when method isn't needed");
4477
4478 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
4479 "caller must use same register for non-constant itable index as for method");
4480
5510 } else {
5511 Label L;
5512 jccb(negate_condition(cc), L);
5513 movl(dst, src);
5514 bind(L);
5515 }
5516 }
5517
5518 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
5519 if (VM_Version::supports_cmov()) {
5520 cmovl(cc, dst, src);
5521 } else {
5522 Label L;
5523 jccb(negate_condition(cc), L);
5524 movl(dst, src);
5525 bind(L);
5526 }
5527 }
5528
5529 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
5530 if (!VerifyOops) return;
5531
5532 BLOCK_COMMENT("verify_oop {");
5533 #ifdef _LP64
5534 push(rscratch1);
5535 #endif
5536 push(rax); // save rax
5537 push(reg); // pass register argument
5538
5539 // Pass register number to verify_oop_subroutine
5540 const char* b = nullptr;
5541 {
5542 ResourceMark rm;
5543 stringStream ss;
5544 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
5545 b = code_string(ss.as_string());
5546 }
5547 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
5548 pushptr(buffer.addr(), rscratch1);
5549
5550 // call indirectly to solve generation ordering problem
5571 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
5572 int stackElementSize = Interpreter::stackElementSize;
5573 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
5574 #ifdef ASSERT
5575 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5576 assert(offset1 - offset == stackElementSize, "correct arithmetic");
5577 #endif
5578 Register scale_reg = noreg;
5579 Address::ScaleFactor scale_factor = Address::no_scale;
5580 if (arg_slot.is_constant()) {
5581 offset += arg_slot.as_constant() * stackElementSize;
5582 } else {
5583 scale_reg = arg_slot.as_register();
5584 scale_factor = Address::times(stackElementSize);
5585 }
5586 offset += wordSize; // return PC is on stack
5587 return Address(rsp, scale_reg, scale_factor, offset);
5588 }
5589
5590 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
5591 if (!VerifyOops) return;
5592
5593 #ifdef _LP64
5594 push(rscratch1);
5595 #endif
5596 push(rax); // save rax,
5597 // addr may contain rsp so we will have to adjust it based on the push
5598 // we just did (and on 64 bit we do two pushes)
5599 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5600 // stores rax into addr which is backwards of what was intended.
5601 if (addr.uses(rsp)) {
5602 lea(rax, addr);
5603 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
5604 } else {
5605 pushptr(addr);
5606 }
5607
5608 // Pass register number to verify_oop_subroutine
5609 const char* b = nullptr;
5610 {
5611 ResourceMark rm;
6058
6059 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
6060 // get mirror
6061 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
6062 load_method_holder(mirror, method);
6063 movptr(mirror, Address(mirror, mirror_offset));
6064 resolve_oop_handle(mirror, tmp);
6065 }
6066
6067 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
6068 load_method_holder(rresult, rmethod);
6069 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
6070 }
6071
6072 void MacroAssembler::load_method_holder(Register holder, Register method) {
6073 movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
6074 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
6075 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
6076 }
6077
6078 #ifdef _LP64
6079 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
6080 assert(UseCompactObjectHeaders, "expect compact object headers");
6081 movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
6082 shrq(dst, markWord::klass_shift);
6083 }
6084 #endif
6085
6086 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
6087 assert_different_registers(src, tmp);
6088 assert_different_registers(dst, tmp);
6089 #ifdef _LP64
6090 if (UseCompactObjectHeaders) {
6091 load_narrow_klass_compact(dst, src);
6092 decode_klass_not_null(dst, tmp);
6093 } else if (UseCompressedClassPointers) {
6094 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
6095 decode_klass_not_null(dst, tmp);
6096 } else
6097 #endif
6098 {
6099 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
6100 }
6101 }
6102
6103 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
6104 assert(!UseCompactObjectHeaders, "not with compact headers");
6105 assert_different_registers(src, tmp);
6106 assert_different_registers(dst, tmp);
6107 #ifdef _LP64
6108 if (UseCompressedClassPointers) {
6109 encode_klass_not_null(src, tmp);
6110 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
6111 } else
6112 #endif
6113 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
6114 }
6115
6116 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
6117 #ifdef _LP64
6118 if (UseCompactObjectHeaders) {
6119 assert(tmp != noreg, "need tmp");
6120 assert_different_registers(klass, obj, tmp);
6121 load_narrow_klass_compact(tmp, obj);
6122 cmpl(klass, tmp);
6155 bool as_raw = (decorators & AS_RAW) != 0;
6156 if (as_raw) {
6157 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
6158 } else {
6159 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
6160 }
6161 }
6162
6163 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
6164 Register tmp1, Register tmp2, Register tmp3) {
6165 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
6166 decorators = AccessInternal::decorator_fixup(decorators, type);
6167 bool as_raw = (decorators & AS_RAW) != 0;
6168 if (as_raw) {
6169 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
6170 } else {
6171 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
6172 }
6173 }
6174
6175 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
6176 Register thread_tmp, DecoratorSet decorators) {
6177 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
6178 }
6179
6180 // Doesn't do verification, generates fixed size code
6181 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
6182 Register thread_tmp, DecoratorSet decorators) {
6183 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
6184 }
6185
6186 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
6187 Register tmp2, Register tmp3, DecoratorSet decorators) {
6188 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
6189 }
6190
6191 // Used for storing nulls.
6192 void MacroAssembler::store_heap_oop_null(Address dst) {
6193 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
6194 }
6503
6504 void MacroAssembler::reinit_heapbase() {
6505 if (UseCompressedOops) {
6506 if (Universe::heap() != nullptr) {
6507 if (CompressedOops::base() == nullptr) {
6508 MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6509 } else {
6510 mov64(r12_heapbase, (int64_t)CompressedOops::base());
6511 }
6512 } else {
6513 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
6514 }
6515 }
6516 }
6517
6518 #endif // _LP64
6519
6520 #if COMPILER2_OR_JVMCI
6521
6522 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
6523 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
6524 // cnt - number of qwords (8-byte words).
6525 // base - start address, qword aligned.
6526 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
6527 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
6528 if (use64byteVector) {
6529 vpxor(xtmp, xtmp, xtmp, AVX_512bit);
6530 } else if (MaxVectorSize >= 32) {
6531 vpxor(xtmp, xtmp, xtmp, AVX_256bit);
6532 } else {
6533 pxor(xtmp, xtmp);
6534 }
6535 jmp(L_zero_64_bytes);
6536
6537 BIND(L_loop);
6538 if (MaxVectorSize >= 32) {
6539 fill64(base, 0, xtmp, use64byteVector);
6540 } else {
6541 movdqu(Address(base, 0), xtmp);
6542 movdqu(Address(base, 16), xtmp);
6543 movdqu(Address(base, 32), xtmp);
6544 movdqu(Address(base, 48), xtmp);
6545 }
6546 addptr(base, 64);
6547
6548 BIND(L_zero_64_bytes);
6549 subptr(cnt, 8);
6550 jccb(Assembler::greaterEqual, L_loop);
6551
6552 // Copy trailing 64 bytes
6553 if (use64byteVector) {
6554 addptr(cnt, 8);
6555 jccb(Assembler::equal, L_end);
6556 fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
6557 jmp(L_end);
6558 } else {
6559 addptr(cnt, 4);
6560 jccb(Assembler::less, L_tail);
6561 if (MaxVectorSize >= 32) {
6562 vmovdqu(Address(base, 0), xtmp);
6563 } else {
6564 movdqu(Address(base, 0), xtmp);
6565 movdqu(Address(base, 16), xtmp);
6566 }
6567 }
6568 addptr(base, 32);
6569 subptr(cnt, 4);
6570
6571 BIND(L_tail);
6572 addptr(cnt, 4);
6573 jccb(Assembler::lessEqual, L_end);
6574 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
6575 fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
6576 } else {
6577 decrement(cnt);
6578
6579 BIND(L_sloop);
6580 movq(Address(base, 0), xtmp);
6581 addptr(base, 8);
6582 decrement(cnt);
6583 jccb(Assembler::greaterEqual, L_sloop);
6584 }
6585 BIND(L_end);
6586 }
6587
6588 // Clearing constant sized memory using YMM/ZMM registers.
6589 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
6590 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
6591 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
6592
6593 int vector64_count = (cnt & (~0x7)) >> 3;
6594 cnt = cnt & 0x7;
6595 const int fill64_per_loop = 4;
6596 const int max_unrolled_fill64 = 8;
6597
6598 // 64 byte initialization loop.
6599 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
6600 int start64 = 0;
6601 if (vector64_count > max_unrolled_fill64) {
6602 Label LOOP;
6603 Register index = rtmp;
6604
6605 start64 = vector64_count - (vector64_count % fill64_per_loop);
6606
6607 movl(index, 0);
6657 break;
6658 case 7:
6659 if (use64byteVector) {
6660 movl(rtmp, 0x7F);
6661 kmovwl(mask, rtmp);
6662 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
6663 } else {
6664 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
6665 movl(rtmp, 0x7);
6666 kmovwl(mask, rtmp);
6667 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
6668 }
6669 break;
6670 default:
6671 fatal("Unexpected length : %d\n",cnt);
6672 break;
6673 }
6674 }
6675 }
6676
6677 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
6678 bool is_large, KRegister mask) {
6679 // cnt - number of qwords (8-byte words).
6680 // base - start address, qword aligned.
6681 // is_large - if optimizers know cnt is larger than InitArrayShortSize
6682 assert(base==rdi, "base register must be edi for rep stos");
6683 assert(tmp==rax, "tmp register must be eax for rep stos");
6684 assert(cnt==rcx, "cnt register must be ecx for rep stos");
6685 assert(InitArrayShortSize % BytesPerLong == 0,
6686 "InitArrayShortSize should be the multiple of BytesPerLong");
6687
6688 Label DONE;
6689 if (!is_large || !UseXMMForObjInit) {
6690 xorptr(tmp, tmp);
6691 }
6692
6693 if (!is_large) {
6694 Label LOOP, LONG;
6695 cmpptr(cnt, InitArrayShortSize/BytesPerLong);
6696 jccb(Assembler::greater, LONG);
6697
6698 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
6699
6700 decrement(cnt);
6701 jccb(Assembler::negative, DONE); // Zero length
6702
6703 // Use individual pointer-sized stores for small counts:
6704 BIND(LOOP);
6705 movptr(Address(base, cnt, Address::times_ptr), tmp);
6706 decrement(cnt);
6707 jccb(Assembler::greaterEqual, LOOP);
6708 jmpb(DONE);
6709
6710 BIND(LONG);
6711 }
6712
6713 // Use longer rep-prefixed ops for non-small counts:
6714 if (UseFastStosb) {
6715 shlptr(cnt, 3); // convert to number of bytes
6716 rep_stosb();
6717 } else if (UseXMMForObjInit) {
6718 xmm_clear_mem(base, cnt, tmp, xtmp, mask);
6719 } else {
6720 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
6721 rep_stos();
6722 }
6723
6724 BIND(DONE);
6725 }
6726
6727 #endif //COMPILER2_OR_JVMCI
6728
6729
6730 void MacroAssembler::generate_fill(BasicType t, bool aligned,
6731 Register to, Register value, Register count,
6732 Register rtmp, XMMRegister xtmp) {
6733 ShortBranchVerifier sbv(this);
6734 assert_different_registers(to, value, count, rtmp);
6735 Label L_exit;
6736 Label L_fill_2_bytes, L_fill_4_bytes;
6737
6738 #if defined(COMPILER2) && defined(_LP64)
10819
10820 // Load top.
10821 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10822
10823 // Check if the lock-stack is full.
10824 cmpl(top, LockStack::end_offset());
10825 jcc(Assembler::greaterEqual, slow);
10826
10827 // Check for recursion.
10828 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10829 jcc(Assembler::equal, push);
10830
10831 // Check header for monitor (0b10).
10832 testptr(reg_rax, markWord::monitor_value);
10833 jcc(Assembler::notZero, slow);
10834
10835 // Try to lock. Transition lock bits 0b01 => 0b00
10836 movptr(tmp, reg_rax);
10837 andptr(tmp, ~(int32_t)markWord::unlocked_value);
10838 orptr(reg_rax, markWord::unlocked_value);
10839 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10840 jcc(Assembler::notEqual, slow);
10841
10842 // Restore top, CAS clobbers register.
10843 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10844
10845 bind(push);
10846 // After successful lock, push object on lock-stack.
10847 movptr(Address(thread, top), obj);
10848 incrementl(top, oopSize);
10849 movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10850 }
10851
10852 // Implements lightweight-unlocking.
10853 //
10854 // obj: the object to be unlocked
10855 // reg_rax: rax
10856 // thread: the thread
10857 // tmp: a temporary register
10858 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "code/compiledIC.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "ci/ciInlineKlass.hpp"
31 #include "crc32c.h"
32 #include "gc/shared/barrierSet.hpp"
33 #include "gc/shared/barrierSetAssembler.hpp"
34 #include "gc/shared/collectedHeap.inline.hpp"
35 #include "gc/shared/tlab_globals.hpp"
36 #include "interpreter/bytecodeHistogram.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "interpreter/interpreterRuntime.hpp"
39 #include "jvm.h"
40 #include "memory/resourceArea.hpp"
41 #include "memory/universe.hpp"
42 #include "oops/accessDecorators.hpp"
43 #include "oops/compressedKlass.inline.hpp"
44 #include "oops/compressedOops.inline.hpp"
45 #include "oops/klass.inline.hpp"
46 #include "oops/resolvedFieldEntry.hpp"
47 #include "prims/methodHandles.hpp"
48 #include "runtime/continuation.hpp"
49 #include "runtime/interfaceSupport.inline.hpp"
50 #include "runtime/javaThread.hpp"
51 #include "runtime/jniHandles.hpp"
52 #include "runtime/objectMonitor.hpp"
53 #include "runtime/os.hpp"
54 #include "runtime/safepoint.hpp"
55 #include "runtime/safepointMechanism.hpp"
56 #include "runtime/sharedRuntime.hpp"
57 #include "runtime/signature_cc.hpp"
58 #include "runtime/stubRoutines.hpp"
59 #include "utilities/checkedCast.hpp"
60 #include "utilities/macros.hpp"
61 #include "vmreg_x86.inline.hpp"
62 #ifdef COMPILER2
63 #include "opto/output.hpp"
64 #endif
65
66 #ifdef PRODUCT
67 #define BLOCK_COMMENT(str) /* nothing */
68 #define STOP(error) stop(error)
69 #else
70 #define BLOCK_COMMENT(str) block_comment(str)
71 #define STOP(error) block_comment(error); stop(error)
72 #endif
73
74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
75
76 #ifdef ASSERT
77 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
78 #endif
79
80 static const Assembler::Condition reverse[] = {
81 Assembler::noOverflow /* overflow = 0x0 */ ,
82 Assembler::overflow /* noOverflow = 0x1 */ ,
83 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
84 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
1726 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1727 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
1728 LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
1729 pass_arg2(this, arg_2);
1730 pass_arg1(this, arg_1);
1731 pass_arg0(this, arg_0);
1732 call_VM_leaf(entry_point, 3);
1733 }
1734
1735 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1736 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3));
1737 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3));
1738 LP64_ONLY(assert_different_registers(arg_2, c_rarg3));
1739 pass_arg3(this, arg_3);
1740 pass_arg2(this, arg_2);
1741 pass_arg1(this, arg_1);
1742 pass_arg0(this, arg_0);
1743 call_VM_leaf(entry_point, 3);
1744 }
1745
1746 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1747 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1748 }
1749
1750 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1751 pass_arg0(this, arg_0);
1752 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1753 }
1754
1755 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1756 LP64_ONLY(assert_different_registers(arg_0, c_rarg1));
1757 pass_arg1(this, arg_1);
1758 pass_arg0(this, arg_0);
1759 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1760 }
1761
1762 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1763 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2));
1764 LP64_ONLY(assert_different_registers(arg_1, c_rarg2));
1765 pass_arg2(this, arg_2);
1766 pass_arg1(this, arg_1);
1767 pass_arg0(this, arg_0);
1768 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1769 }
3007 lea(rscratch, src);
3008 Assembler::mulss(dst, Address(rscratch, 0));
3009 }
3010 }
3011
3012 void MacroAssembler::null_check(Register reg, int offset) {
3013 if (needs_explicit_null_check(offset)) {
3014 // provoke OS null exception if reg is null by
3015 // accessing M[reg] w/o changing any (non-CC) registers
3016 // NOTE: cmpl is plenty here to provoke a segv
3017 cmpptr(rax, Address(reg, 0));
3018 // Note: should probably use testl(rax, Address(reg, 0));
3019 // may be shorter code (however, this version of
3020 // testl needs to be implemented first)
3021 } else {
3022 // nothing to do, (later) access of M[reg + offset]
3023 // will provoke OS null exception if reg is null
3024 }
3025 }
3026
3027 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
3028 andptr(markword, markWord::inline_type_mask_in_place);
3029 cmpptr(markword, markWord::inline_type_pattern);
3030 jcc(Assembler::equal, is_inline_type);
3031 }
3032
3033 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
3034 load_unsigned_short(temp_reg, Address(klass, Klass::access_flags_offset()));
3035 testl(temp_reg, JVM_ACC_IDENTITY);
3036 jcc(Assembler::zero, is_inline_type);
3037 }
3038
3039 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
3040 testptr(object, object);
3041 jcc(Assembler::zero, not_inline_type);
3042 const int is_inline_type_mask = markWord::inline_type_pattern;
3043 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
3044 andptr(tmp, is_inline_type_mask);
3045 cmpptr(tmp, is_inline_type_mask);
3046 jcc(Assembler::notEqual, not_inline_type);
3047 }
3048
3049 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
3050 movl(temp_reg, flags);
3051 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
3052 jcc(Assembler::notEqual, is_null_free_inline_type);
3053 }
3054
3055 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
3056 movl(temp_reg, flags);
3057 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
3058 jcc(Assembler::equal, not_null_free_inline_type);
3059 }
3060
3061 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
3062 movl(temp_reg, flags);
3063 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
3064 jcc(Assembler::notEqual, is_flat);
3065 }
3066
3067 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
3068 movl(temp_reg, flags);
3069 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
3070 jcc(Assembler::notEqual, has_null_marker);
3071 }
3072
3073 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
3074 Label test_mark_word;
3075 // load mark word
3076 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
3077 // check displaced
3078 testl(temp_reg, markWord::unlocked_value);
3079 jccb(Assembler::notZero, test_mark_word);
3080 // slow path use klass prototype
3081 push(rscratch1);
3082 load_prototype_header(temp_reg, oop, rscratch1);
3083 pop(rscratch1);
3084
3085 bind(test_mark_word);
3086 testl(temp_reg, test_bit);
3087 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
3088 }
3089
3090 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
3091 Label& is_flat_array) {
3092 #ifdef _LP64
3093 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
3094 #else
3095 load_klass(temp_reg, oop, noreg);
3096 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
3097 test_flat_array_layout(temp_reg, is_flat_array);
3098 #endif
3099 }
3100
3101 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
3102 Label& is_non_flat_array) {
3103 #ifdef _LP64
3104 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
3105 #else
3106 load_klass(temp_reg, oop, noreg);
3107 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
3108 test_non_flat_array_layout(temp_reg, is_non_flat_array);
3109 #endif
3110 }
3111
3112 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
3113 #ifdef _LP64
3114 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
3115 #else
3116 Unimplemented();
3117 #endif
3118 }
3119
3120 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
3121 #ifdef _LP64
3122 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
3123 #else
3124 Unimplemented();
3125 #endif
3126 }
3127
3128 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
3129 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
3130 jcc(Assembler::notZero, is_flat_array);
3131 }
3132
3133 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
3134 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
3135 jcc(Assembler::zero, is_non_flat_array);
3136 }
3137
3138 void MacroAssembler::os_breakpoint() {
3139 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
3140 // (e.g., MSVC can't call ps() otherwise)
3141 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
3142 }
3143
3144 void MacroAssembler::unimplemented(const char* what) {
3145 const char* buf = nullptr;
3146 {
3147 ResourceMark rm;
3148 stringStream ss;
3149 ss.print("unimplemented: %s", what);
3150 buf = code_string(ss.as_string());
3151 }
3152 stop(buf);
3153 }
3154
3155 #ifdef _LP64
3156 #define XSTATE_BV 0x200
3157 #endif
4305 }
4306
4307 // C++ bool manipulation
4308 void MacroAssembler::testbool(Register dst) {
4309 if(sizeof(bool) == 1)
4310 testb(dst, 0xff);
4311 else if(sizeof(bool) == 2) {
4312 // testw implementation needed for two byte bools
4313 ShouldNotReachHere();
4314 } else if(sizeof(bool) == 4)
4315 testl(dst, dst);
4316 else
4317 // unsupported
4318 ShouldNotReachHere();
4319 }
4320
4321 void MacroAssembler::testptr(Register dst, Register src) {
4322 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
4323 }
4324
4325 // Object / value buffer allocation...
4326 //
4327 // Kills klass and rsi on LP64
4328 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
4329 Register t1, Register t2,
4330 bool clear_fields, Label& alloc_failed)
4331 {
4332 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
4333 Register layout_size = t1;
4334 assert(new_obj == rax, "needs to be rax");
4335 assert_different_registers(klass, new_obj, t1, t2);
4336
4337 // get instance_size in InstanceKlass (scaled to a count of bytes)
4338 movl(layout_size, Address(klass, Klass::layout_helper_offset()));
4339 // test to see if it is malformed in some way
4340 testl(layout_size, Klass::_lh_instance_slow_path_bit);
4341 jcc(Assembler::notZero, slow_case_no_pop);
4342
4343 // Allocate the instance:
4344 // If TLAB is enabled:
4345 // Try to allocate in the TLAB.
4346 // If fails, go to the slow path.
4347 // Else If inline contiguous allocations are enabled:
4348 // Try to allocate in eden.
4349 // If fails due to heap end, go to slow path.
4350 //
4351 // If TLAB is enabled OR inline contiguous is enabled:
4352 // Initialize the allocation.
4353 // Exit.
4354 //
4355 // Go to slow path.
4356
4357 push(klass);
4358 const Register thread = r15_thread;
4359
4360 if (UseTLAB) {
4361 tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case);
4362 if (ZeroTLAB || (!clear_fields)) {
4363 // the fields have been already cleared
4364 jmp(initialize_header);
4365 } else {
4366 // initialize both the header and fields
4367 jmp(initialize_object);
4368 }
4369 } else {
4370 jmp(slow_case);
4371 }
4372
4373 // If UseTLAB is true, the object is created above and there is an initialize need.
4374 // Otherwise, skip and go to the slow path.
4375 if (UseTLAB) {
4376 if (clear_fields) {
4377 // The object is initialized before the header. If the object size is
4378 // zero, go directly to the header initialization.
4379 bind(initialize_object);
4380 if (UseCompactObjectHeaders) {
4381 assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
4382 decrement(layout_size, oopDesc::base_offset_in_bytes());
4383 } else {
4384 decrement(layout_size, sizeof(oopDesc));
4385 }
4386 jcc(Assembler::zero, initialize_header);
4387
4388 // Initialize topmost object field, divide size by 8, check if odd and
4389 // test if zero.
4390 Register zero = klass;
4391 xorl(zero, zero); // use zero reg to clear memory (shorter code)
4392 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4393
4394 #ifdef ASSERT
4395 // make sure instance_size was multiple of 8
4396 Label L;
4397 // Ignore partial flag stall after shrl() since it is debug VM
4398 jcc(Assembler::carryClear, L);
4399 stop("object size is not multiple of 2 - adjust this code");
4400 bind(L);
4401 // must be > 0, no extra check needed here
4402 #endif
4403
4404 // initialize remaining object fields: instance_size was a multiple of 8
4405 {
4406 Label loop;
4407 bind(loop);
4408 int header_size_bytes = oopDesc::header_size() * HeapWordSize;
4409 assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
4410 movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
4411 decrement(layout_size);
4412 jcc(Assembler::notZero, loop);
4413 }
4414 } // clear_fields
4415
4416 // initialize object header only.
4417 bind(initialize_header);
4418 if (UseCompactObjectHeaders || EnableValhalla) {
4419 pop(klass);
4420 Register mark_word = t2;
4421 movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
4422 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
4423 } else {
4424 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
4425 (intptr_t)markWord::prototype().value()); // header
4426 pop(klass); // get saved klass back in the register.
4427 }
4428 if (!UseCompactObjectHeaders) {
4429 xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4430 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops
4431 movptr(t2, klass); // preserve klass
4432 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed
4433 }
4434 jmp(done);
4435 }
4436
4437 bind(slow_case);
4438 pop(klass);
4439 bind(slow_case_no_pop);
4440 jmp(alloc_failed);
4441
4442 bind(done);
4443 }
4444
4445 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4446 void MacroAssembler::tlab_allocate(Register thread, Register obj,
4447 Register var_size_in_bytes,
4448 int con_size_in_bytes,
4449 Register t1,
4450 Register t2,
4451 Label& slow_case) {
4452 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4453 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
4454 }
4455
4456 RegSet MacroAssembler::call_clobbered_gp_registers() {
4457 RegSet regs;
4458 #ifdef _LP64
4459 regs += RegSet::of(rax, rcx, rdx);
4460 #ifndef _WINDOWS
4461 regs += RegSet::of(rsi, rdi);
4462 #endif
4463 regs += RegSet::range(r8, r11);
4464 #else
4683 // clear topmost word (no jump would be needed if conditional assignment worked here)
4684 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
4685 // index could be 0 now, must check again
4686 jcc(Assembler::zero, done);
4687 bind(even);
4688 }
4689 #endif // !_LP64
4690 // initialize remaining object fields: index is a multiple of 2 now
4691 {
4692 Label loop;
4693 bind(loop);
4694 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
4695 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
4696 decrement(index);
4697 jcc(Assembler::notZero, loop);
4698 }
4699
4700 bind(done);
4701 }
4702
4703 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
4704 inline_layout_info(holder_klass, index, inline_klass);
4705 movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
4706 }
4707
4708 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
4709 movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
4710 #ifdef ASSERT
4711 {
4712 Label done;
4713 cmpptr(layout_info, 0);
4714 jcc(Assembler::notEqual, done);
4715 stop("inline_layout_info_array is null");
4716 bind(done);
4717 }
4718 #endif
4719
4720 InlineLayoutInfo array[2];
4721 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
4722 if (is_power_of_2(size)) {
4723 shll(index, log2i_exact(size)); // Scale index by power of 2
4724 } else {
4725 imull(index, index, size); // Scale the index to be the entry index * array_element_size
4726 }
4727 lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
4728 }
4729
4730 // Look up the method for a megamorphic invokeinterface call.
4731 // The target method is determined by <intf_klass, itable_index>.
4732 // The receiver klass is in recv_klass.
4733 // On success, the result will be in method_result, and execution falls through.
4734 // On failure, execution transfers to the given label.
4735 void MacroAssembler::lookup_interface_method(Register recv_klass,
4736 Register intf_klass,
4737 RegisterOrConstant itable_index,
4738 Register method_result,
4739 Register scan_temp,
4740 Label& L_no_such_interface,
4741 bool return_method) {
4742 assert_different_registers(recv_klass, intf_klass, scan_temp);
4743 assert_different_registers(method_result, intf_klass, scan_temp);
4744 assert(recv_klass != method_result || !return_method,
4745 "recv_klass can be destroyed when method isn't needed");
4746
4747 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
4748 "caller must use same register for non-constant itable index as for method");
4749
5779 } else {
5780 Label L;
5781 jccb(negate_condition(cc), L);
5782 movl(dst, src);
5783 bind(L);
5784 }
5785 }
5786
5787 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
5788 if (VM_Version::supports_cmov()) {
5789 cmovl(cc, dst, src);
5790 } else {
5791 Label L;
5792 jccb(negate_condition(cc), L);
5793 movl(dst, src);
5794 bind(L);
5795 }
5796 }
5797
5798 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
5799 if (!VerifyOops || VerifyAdapterSharing) {
5800 // Below address of the code string confuses VerifyAdapterSharing
5801 // because it may differ between otherwise equivalent adapters.
5802 return;
5803 }
5804
5805 BLOCK_COMMENT("verify_oop {");
5806 #ifdef _LP64
5807 push(rscratch1);
5808 #endif
5809 push(rax); // save rax
5810 push(reg); // pass register argument
5811
5812 // Pass register number to verify_oop_subroutine
5813 const char* b = nullptr;
5814 {
5815 ResourceMark rm;
5816 stringStream ss;
5817 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
5818 b = code_string(ss.as_string());
5819 }
5820 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
5821 pushptr(buffer.addr(), rscratch1);
5822
5823 // call indirectly to solve generation ordering problem
5844 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
5845 int stackElementSize = Interpreter::stackElementSize;
5846 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
5847 #ifdef ASSERT
5848 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5849 assert(offset1 - offset == stackElementSize, "correct arithmetic");
5850 #endif
5851 Register scale_reg = noreg;
5852 Address::ScaleFactor scale_factor = Address::no_scale;
5853 if (arg_slot.is_constant()) {
5854 offset += arg_slot.as_constant() * stackElementSize;
5855 } else {
5856 scale_reg = arg_slot.as_register();
5857 scale_factor = Address::times(stackElementSize);
5858 }
5859 offset += wordSize; // return PC is on stack
5860 return Address(rsp, scale_reg, scale_factor, offset);
5861 }
5862
5863 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
5864 if (!VerifyOops || VerifyAdapterSharing) {
5865 // Below address of the code string confuses VerifyAdapterSharing
5866 // because it may differ between otherwise equivalent adapters.
5867 return;
5868 }
5869
5870 #ifdef _LP64
5871 push(rscratch1);
5872 #endif
5873 push(rax); // save rax,
5874 // addr may contain rsp so we will have to adjust it based on the push
5875 // we just did (and on 64 bit we do two pushes)
5876 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5877 // stores rax into addr which is backwards of what was intended.
5878 if (addr.uses(rsp)) {
5879 lea(rax, addr);
5880 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
5881 } else {
5882 pushptr(addr);
5883 }
5884
5885 // Pass register number to verify_oop_subroutine
5886 const char* b = nullptr;
5887 {
5888 ResourceMark rm;
6335
6336 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
6337 // get mirror
6338 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
6339 load_method_holder(mirror, method);
6340 movptr(mirror, Address(mirror, mirror_offset));
6341 resolve_oop_handle(mirror, tmp);
6342 }
6343
6344 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
6345 load_method_holder(rresult, rmethod);
6346 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
6347 }
6348
6349 void MacroAssembler::load_method_holder(Register holder, Register method) {
6350 movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
6351 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
6352 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
6353 }
6354
6355 void MacroAssembler::load_metadata(Register dst, Register src) {
6356 #ifdef _LP64
6357 if (UseCompactObjectHeaders) {
6358 load_narrow_klass_compact(dst, src);
6359 } else if (UseCompressedClassPointers) {
6360 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
6361 } else
6362 #endif
6363 {
6364 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
6365 }
6366 }
6367
6368 #ifdef _LP64
6369 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
6370 assert(UseCompactObjectHeaders, "expect compact object headers");
6371 movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
6372 shrq(dst, markWord::klass_shift);
6373 }
6374 #endif
6375
6376 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
6377 assert_different_registers(src, tmp);
6378 assert_different_registers(dst, tmp);
6379 #ifdef _LP64
6380 if (UseCompactObjectHeaders) {
6381 load_narrow_klass_compact(dst, src);
6382 decode_klass_not_null(dst, tmp);
6383 } else if (UseCompressedClassPointers) {
6384 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
6385 decode_klass_not_null(dst, tmp);
6386 } else
6387 #endif
6388 {
6389 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
6390 }
6391 }
6392
6393 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
6394 load_klass(dst, src, tmp);
6395 movptr(dst, Address(dst, Klass::prototype_header_offset()));
6396 }
6397
6398 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
6399 assert(!UseCompactObjectHeaders, "not with compact headers");
6400 assert_different_registers(src, tmp);
6401 assert_different_registers(dst, tmp);
6402 #ifdef _LP64
6403 if (UseCompressedClassPointers) {
6404 encode_klass_not_null(src, tmp);
6405 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
6406 } else
6407 #endif
6408 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
6409 }
6410
6411 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
6412 #ifdef _LP64
6413 if (UseCompactObjectHeaders) {
6414 assert(tmp != noreg, "need tmp");
6415 assert_different_registers(klass, obj, tmp);
6416 load_narrow_klass_compact(tmp, obj);
6417 cmpl(klass, tmp);
6450 bool as_raw = (decorators & AS_RAW) != 0;
6451 if (as_raw) {
6452 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
6453 } else {
6454 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
6455 }
6456 }
6457
6458 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
6459 Register tmp1, Register tmp2, Register tmp3) {
6460 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
6461 decorators = AccessInternal::decorator_fixup(decorators, type);
6462 bool as_raw = (decorators & AS_RAW) != 0;
6463 if (as_raw) {
6464 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
6465 } else {
6466 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
6467 }
6468 }
6469
6470 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
6471 Register inline_layout_info) {
6472 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
6473 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
6474 }
6475
6476 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
6477 movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
6478 movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
6479 }
6480
6481 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
6482 // ((address) (void*) o) + vk->payload_offset();
6483 Register offset = (data == oop) ? rscratch1 : data;
6484 payload_offset(inline_klass, offset);
6485 if (data == oop) {
6486 addptr(data, offset);
6487 } else {
6488 lea(data, Address(oop, offset));
6489 }
6490 }
6491
6492 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
6493 Register index, Register data) {
6494 assert(index != rcx, "index needs to shift by rcx");
6495 assert_different_registers(array, array_klass, index);
6496 assert_different_registers(rcx, array, index);
6497
6498 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
6499 movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
6500
6501 // Klass::layout_helper_log2_element_size(lh)
6502 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
6503 shrl(rcx, Klass::_lh_log2_element_size_shift);
6504 andl(rcx, Klass::_lh_log2_element_size_mask);
6505 shlptr(index); // index << rcx
6506
6507 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
6508 }
6509
6510 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
6511 Register thread_tmp, DecoratorSet decorators) {
6512 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
6513 }
6514
6515 // Doesn't do verification, generates fixed size code
6516 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
6517 Register thread_tmp, DecoratorSet decorators) {
6518 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
6519 }
6520
6521 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
6522 Register tmp2, Register tmp3, DecoratorSet decorators) {
6523 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
6524 }
6525
6526 // Used for storing nulls.
6527 void MacroAssembler::store_heap_oop_null(Address dst) {
6528 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
6529 }
6838
6839 void MacroAssembler::reinit_heapbase() {
6840 if (UseCompressedOops) {
6841 if (Universe::heap() != nullptr) {
6842 if (CompressedOops::base() == nullptr) {
6843 MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6844 } else {
6845 mov64(r12_heapbase, (int64_t)CompressedOops::base());
6846 }
6847 } else {
6848 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
6849 }
6850 }
6851 }
6852
6853 #endif // _LP64
6854
6855 #if COMPILER2_OR_JVMCI
6856
6857 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
6858 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
6859 // cnt - number of qwords (8-byte words).
6860 // base - start address, qword aligned.
6861 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
6862 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
6863 if (use64byteVector) {
6864 evpbroadcastq(xtmp, val, AVX_512bit);
6865 } else if (MaxVectorSize >= 32) {
6866 movdq(xtmp, val);
6867 punpcklqdq(xtmp, xtmp);
6868 vinserti128_high(xtmp, xtmp);
6869 } else {
6870 movdq(xtmp, val);
6871 punpcklqdq(xtmp, xtmp);
6872 }
6873 jmp(L_zero_64_bytes);
6874
6875 BIND(L_loop);
6876 if (MaxVectorSize >= 32) {
6877 fill64(base, 0, xtmp, use64byteVector);
6878 } else {
6879 movdqu(Address(base, 0), xtmp);
6880 movdqu(Address(base, 16), xtmp);
6881 movdqu(Address(base, 32), xtmp);
6882 movdqu(Address(base, 48), xtmp);
6883 }
6884 addptr(base, 64);
6885
6886 BIND(L_zero_64_bytes);
6887 subptr(cnt, 8);
6888 jccb(Assembler::greaterEqual, L_loop);
6889
6890 // Copy trailing 64 bytes
6891 if (use64byteVector) {
6892 addptr(cnt, 8);
6893 jccb(Assembler::equal, L_end);
6894 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
6895 jmp(L_end);
6896 } else {
6897 addptr(cnt, 4);
6898 jccb(Assembler::less, L_tail);
6899 if (MaxVectorSize >= 32) {
6900 vmovdqu(Address(base, 0), xtmp);
6901 } else {
6902 movdqu(Address(base, 0), xtmp);
6903 movdqu(Address(base, 16), xtmp);
6904 }
6905 }
6906 addptr(base, 32);
6907 subptr(cnt, 4);
6908
6909 BIND(L_tail);
6910 addptr(cnt, 4);
6911 jccb(Assembler::lessEqual, L_end);
6912 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
6913 fill32_masked(3, base, 0, xtmp, mask, cnt, val);
6914 } else {
6915 decrement(cnt);
6916
6917 BIND(L_sloop);
6918 movq(Address(base, 0), xtmp);
6919 addptr(base, 8);
6920 decrement(cnt);
6921 jccb(Assembler::greaterEqual, L_sloop);
6922 }
6923 BIND(L_end);
6924 }
6925
6926 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
6927 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
6928 // An inline type might be returned. If fields are in registers we
6929 // need to allocate an inline type instance and initialize it with
6930 // the value of the fields.
6931 Label skip;
6932 // We only need a new buffered inline type if a new one is not returned
6933 testptr(rax, 1);
6934 jcc(Assembler::zero, skip);
6935 int call_offset = -1;
6936
6937 #ifdef _LP64
6938 // The following code is similar to allocate_instance but has some slight differences,
6939 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
6940 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
6941 Label slow_case;
6942 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
6943 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
6944 if (vk != nullptr) {
6945 // Called from C1, where the return type is statically known.
6946 movptr(rbx, (intptr_t)vk->get_InlineKlass());
6947 jint lh = vk->layout_helper();
6948 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
6949 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
6950 tlab_allocate(r15_thread, rax, noreg, lh, r13, r14, slow_case);
6951 } else {
6952 jmp(slow_case);
6953 }
6954 } else {
6955 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
6956 mov(rbx, rax);
6957 andptr(rbx, -2);
6958 if (UseTLAB) {
6959 movl(r14, Address(rbx, Klass::layout_helper_offset()));
6960 testl(r14, Klass::_lh_instance_slow_path_bit);
6961 jcc(Assembler::notZero, slow_case);
6962 tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case);
6963 } else {
6964 jmp(slow_case);
6965 }
6966 }
6967 if (UseTLAB) {
6968 // 2. Initialize buffered inline instance header
6969 Register buffer_obj = rax;
6970 if (UseCompactObjectHeaders) {
6971 Register mark_word = r13;
6972 movptr(mark_word, Address(rbx, Klass::prototype_header_offset()));
6973 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
6974 } else {
6975 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
6976 xorl(r13, r13);
6977 store_klass_gap(buffer_obj, r13);
6978 if (vk == nullptr) {
6979 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
6980 mov(r13, rbx);
6981 }
6982 store_klass(buffer_obj, rbx, rscratch1);
6983 }
6984 // 3. Initialize its fields with an inline class specific handler
6985 if (vk != nullptr) {
6986 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
6987 } else {
6988 movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
6989 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
6990 call(rbx);
6991 }
6992 jmp(skip);
6993 }
6994 bind(slow_case);
6995 // We failed to allocate a new inline type, fall back to a runtime
6996 // call. Some oop field may be live in some registers but we can't
6997 // tell. That runtime call will take care of preserving them
6998 // across a GC if there's one.
6999 mov(rax, rscratch1);
7000 #endif
7001
7002 if (from_interpreter) {
7003 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
7004 } else {
7005 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
7006 call_offset = offset();
7007 }
7008
7009 bind(skip);
7010 return call_offset;
7011 }
7012
7013 // Move a value between registers/stack slots and update the reg_state
7014 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
7015 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
7016 if (reg_state[to->value()] == reg_written) {
7017 return true; // Already written
7018 }
7019 if (from != to && bt != T_VOID) {
7020 if (reg_state[to->value()] == reg_readonly) {
7021 return false; // Not yet writable
7022 }
7023 if (from->is_reg()) {
7024 if (to->is_reg()) {
7025 if (from->is_XMMRegister()) {
7026 if (bt == T_DOUBLE) {
7027 movdbl(to->as_XMMRegister(), from->as_XMMRegister());
7028 } else {
7029 assert(bt == T_FLOAT, "must be float");
7030 movflt(to->as_XMMRegister(), from->as_XMMRegister());
7031 }
7032 } else {
7033 movq(to->as_Register(), from->as_Register());
7034 }
7035 } else {
7036 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
7037 Address to_addr = Address(rsp, st_off);
7038 if (from->is_XMMRegister()) {
7039 if (bt == T_DOUBLE) {
7040 movdbl(to_addr, from->as_XMMRegister());
7041 } else {
7042 assert(bt == T_FLOAT, "must be float");
7043 movflt(to_addr, from->as_XMMRegister());
7044 }
7045 } else {
7046 movq(to_addr, from->as_Register());
7047 }
7048 }
7049 } else {
7050 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
7051 if (to->is_reg()) {
7052 if (to->is_XMMRegister()) {
7053 if (bt == T_DOUBLE) {
7054 movdbl(to->as_XMMRegister(), from_addr);
7055 } else {
7056 assert(bt == T_FLOAT, "must be float");
7057 movflt(to->as_XMMRegister(), from_addr);
7058 }
7059 } else {
7060 movq(to->as_Register(), from_addr);
7061 }
7062 } else {
7063 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
7064 movq(r13, from_addr);
7065 movq(Address(rsp, st_off), r13);
7066 }
7067 }
7068 }
7069 // Update register states
7070 reg_state[from->value()] = reg_writable;
7071 reg_state[to->value()] = reg_written;
7072 return true;
7073 }
7074
7075 // Calculate the extra stack space required for packing or unpacking inline
7076 // args and adjust the stack pointer
7077 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
7078 // Two additional slots to account for return address
7079 int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
7080 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
7081 // Save the return address, adjust the stack (make sure it is properly
7082 // 16-byte aligned) and copy the return address to the new top of the stack.
7083 // The stack will be repaired on return (see MacroAssembler::remove_frame).
7084 assert(sp_inc > 0, "sanity");
7085 pop(r13);
7086 subptr(rsp, sp_inc);
7087 push(r13);
7088 return sp_inc;
7089 }
7090
7091 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
7092 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
7093 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
7094 RegState reg_state[]) {
7095 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
7096 assert(from->is_valid(), "source must be valid");
7097 bool progress = false;
7098 #ifdef ASSERT
7099 const int start_offset = offset();
7100 #endif
7101
7102 Label L_null, L_notNull;
7103 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
7104 Register tmp1 = r10;
7105 Register tmp2 = r13;
7106 Register fromReg = noreg;
7107 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
7108 bool done = true;
7109 bool mark_done = true;
7110 VMReg toReg;
7111 BasicType bt;
7112 // Check if argument requires a null check
7113 bool null_check = false;
7114 VMReg nullCheckReg;
7115 while (stream.next(nullCheckReg, bt)) {
7116 if (sig->at(stream.sig_index())._offset == -1) {
7117 null_check = true;
7118 break;
7119 }
7120 }
7121 stream.reset(sig_index, to_index);
7122 while (stream.next(toReg, bt)) {
7123 assert(toReg->is_valid(), "destination must be valid");
7124 int idx = (int)toReg->value();
7125 if (reg_state[idx] == reg_readonly) {
7126 if (idx != from->value()) {
7127 mark_done = false;
7128 }
7129 done = false;
7130 continue;
7131 } else if (reg_state[idx] == reg_written) {
7132 continue;
7133 }
7134 assert(reg_state[idx] == reg_writable, "must be writable");
7135 reg_state[idx] = reg_written;
7136 progress = true;
7137
7138 if (fromReg == noreg) {
7139 if (from->is_reg()) {
7140 fromReg = from->as_Register();
7141 } else {
7142 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
7143 movq(tmp1, Address(rsp, st_off));
7144 fromReg = tmp1;
7145 }
7146 if (null_check) {
7147 // Nullable inline type argument, emit null check
7148 testptr(fromReg, fromReg);
7149 jcc(Assembler::zero, L_null);
7150 }
7151 }
7152 int off = sig->at(stream.sig_index())._offset;
7153 if (off == -1) {
7154 assert(null_check, "Missing null check at");
7155 if (toReg->is_stack()) {
7156 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
7157 movq(Address(rsp, st_off), 1);
7158 } else {
7159 movq(toReg->as_Register(), 1);
7160 }
7161 continue;
7162 }
7163 assert(off > 0, "offset in object should be positive");
7164 Address fromAddr = Address(fromReg, off);
7165 if (!toReg->is_XMMRegister()) {
7166 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
7167 if (is_reference_type(bt)) {
7168 load_heap_oop(dst, fromAddr);
7169 } else {
7170 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
7171 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
7172 }
7173 if (toReg->is_stack()) {
7174 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
7175 movq(Address(rsp, st_off), dst);
7176 }
7177 } else if (bt == T_DOUBLE) {
7178 movdbl(toReg->as_XMMRegister(), fromAddr);
7179 } else {
7180 assert(bt == T_FLOAT, "must be float");
7181 movflt(toReg->as_XMMRegister(), fromAddr);
7182 }
7183 }
7184 if (progress && null_check) {
7185 if (done) {
7186 jmp(L_notNull);
7187 bind(L_null);
7188 // Set IsInit field to zero to signal that the argument is null.
7189 // Also set all oop fields to zero to make the GC happy.
7190 stream.reset(sig_index, to_index);
7191 while (stream.next(toReg, bt)) {
7192 if (sig->at(stream.sig_index())._offset == -1 ||
7193 bt == T_OBJECT || bt == T_ARRAY) {
7194 if (toReg->is_stack()) {
7195 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
7196 movq(Address(rsp, st_off), 0);
7197 } else {
7198 xorq(toReg->as_Register(), toReg->as_Register());
7199 }
7200 }
7201 }
7202 bind(L_notNull);
7203 } else {
7204 bind(L_null);
7205 }
7206 }
7207
7208 sig_index = stream.sig_index();
7209 to_index = stream.regs_index();
7210
7211 if (mark_done && reg_state[from->value()] != reg_written) {
7212 // This is okay because no one else will write to that slot
7213 reg_state[from->value()] = reg_writable;
7214 }
7215 from_index--;
7216 assert(progress || (start_offset == offset()), "should not emit code");
7217 return done;
7218 }
7219
7220 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
7221 VMRegPair* from, int from_count, int& from_index, VMReg to,
7222 RegState reg_state[], Register val_array) {
7223 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
7224 assert(to->is_valid(), "destination must be valid");
7225
7226 if (reg_state[to->value()] == reg_written) {
7227 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
7228 return true; // Already written
7229 }
7230
7231 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
7232 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
7233 Register val_obj_tmp = r11;
7234 Register from_reg_tmp = r14;
7235 Register tmp1 = r10;
7236 Register tmp2 = r13;
7237 Register tmp3 = rbx;
7238 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
7239
7240 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
7241
7242 if (reg_state[to->value()] == reg_readonly) {
7243 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
7244 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
7245 return false; // Not yet writable
7246 }
7247 val_obj = val_obj_tmp;
7248 }
7249
7250 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
7251 load_heap_oop(val_obj, Address(val_array, index));
7252
7253 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
7254 VMReg fromReg;
7255 BasicType bt;
7256 Label L_null;
7257 while (stream.next(fromReg, bt)) {
7258 assert(fromReg->is_valid(), "source must be valid");
7259 reg_state[fromReg->value()] = reg_writable;
7260
7261 int off = sig->at(stream.sig_index())._offset;
7262 if (off == -1) {
7263 // Nullable inline type argument, emit null check
7264 Label L_notNull;
7265 if (fromReg->is_stack()) {
7266 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
7267 testb(Address(rsp, ld_off), 1);
7268 } else {
7269 testb(fromReg->as_Register(), 1);
7270 }
7271 jcc(Assembler::notZero, L_notNull);
7272 movptr(val_obj, 0);
7273 jmp(L_null);
7274 bind(L_notNull);
7275 continue;
7276 }
7277
7278 assert(off > 0, "offset in object should be positive");
7279 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
7280
7281 Address dst(val_obj, off);
7282 if (!fromReg->is_XMMRegister()) {
7283 Register src;
7284 if (fromReg->is_stack()) {
7285 src = from_reg_tmp;
7286 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
7287 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
7288 } else {
7289 src = fromReg->as_Register();
7290 }
7291 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
7292 if (is_reference_type(bt)) {
7293 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
7294 } else {
7295 store_sized_value(dst, src, size_in_bytes);
7296 }
7297 } else if (bt == T_DOUBLE) {
7298 movdbl(dst, fromReg->as_XMMRegister());
7299 } else {
7300 assert(bt == T_FLOAT, "must be float");
7301 movflt(dst, fromReg->as_XMMRegister());
7302 }
7303 }
7304 bind(L_null);
7305 sig_index = stream.sig_index();
7306 from_index = stream.regs_index();
7307
7308 assert(reg_state[to->value()] == reg_writable, "must have already been read");
7309 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
7310 assert(success, "to register must be writeable");
7311 return true;
7312 }
7313
7314 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
7315 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
7316 }
7317
7318 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
7319 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
7320 if (needs_stack_repair) {
7321 movq(rbp, Address(rsp, initial_framesize));
7322 // The stack increment resides just below the saved rbp
7323 addq(rsp, Address(rsp, initial_framesize - wordSize));
7324 } else {
7325 if (initial_framesize > 0) {
7326 addq(rsp, initial_framesize);
7327 }
7328 pop(rbp);
7329 }
7330 }
7331
7332 // Clearing constant sized memory using YMM/ZMM registers.
7333 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
7334 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
7335 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
7336
7337 int vector64_count = (cnt & (~0x7)) >> 3;
7338 cnt = cnt & 0x7;
7339 const int fill64_per_loop = 4;
7340 const int max_unrolled_fill64 = 8;
7341
7342 // 64 byte initialization loop.
7343 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
7344 int start64 = 0;
7345 if (vector64_count > max_unrolled_fill64) {
7346 Label LOOP;
7347 Register index = rtmp;
7348
7349 start64 = vector64_count - (vector64_count % fill64_per_loop);
7350
7351 movl(index, 0);
7401 break;
7402 case 7:
7403 if (use64byteVector) {
7404 movl(rtmp, 0x7F);
7405 kmovwl(mask, rtmp);
7406 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
7407 } else {
7408 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
7409 movl(rtmp, 0x7);
7410 kmovwl(mask, rtmp);
7411 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
7412 }
7413 break;
7414 default:
7415 fatal("Unexpected length : %d\n",cnt);
7416 break;
7417 }
7418 }
7419 }
7420
7421 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
7422 bool is_large, bool word_copy_only, KRegister mask) {
7423 // cnt - number of qwords (8-byte words).
7424 // base - start address, qword aligned.
7425 // is_large - if optimizers know cnt is larger than InitArrayShortSize
7426 assert(base==rdi, "base register must be edi for rep stos");
7427 assert(val==rax, "val register must be eax for rep stos");
7428 assert(cnt==rcx, "cnt register must be ecx for rep stos");
7429 assert(InitArrayShortSize % BytesPerLong == 0,
7430 "InitArrayShortSize should be the multiple of BytesPerLong");
7431
7432 Label DONE;
7433
7434 if (!is_large) {
7435 Label LOOP, LONG;
7436 cmpptr(cnt, InitArrayShortSize/BytesPerLong);
7437 jccb(Assembler::greater, LONG);
7438
7439 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
7440
7441 decrement(cnt);
7442 jccb(Assembler::negative, DONE); // Zero length
7443
7444 // Use individual pointer-sized stores for small counts:
7445 BIND(LOOP);
7446 movptr(Address(base, cnt, Address::times_ptr), val);
7447 decrement(cnt);
7448 jccb(Assembler::greaterEqual, LOOP);
7449 jmpb(DONE);
7450
7451 BIND(LONG);
7452 }
7453
7454 // Use longer rep-prefixed ops for non-small counts:
7455 if (UseFastStosb && !word_copy_only) {
7456 shlptr(cnt, 3); // convert to number of bytes
7457 rep_stosb();
7458 } else if (UseXMMForObjInit) {
7459 xmm_clear_mem(base, cnt, val, xtmp, mask);
7460 } else {
7461 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
7462 rep_stos();
7463 }
7464
7465 BIND(DONE);
7466 }
7467
7468 #endif //COMPILER2_OR_JVMCI
7469
7470
7471 void MacroAssembler::generate_fill(BasicType t, bool aligned,
7472 Register to, Register value, Register count,
7473 Register rtmp, XMMRegister xtmp) {
7474 ShortBranchVerifier sbv(this);
7475 assert_different_registers(to, value, count, rtmp);
7476 Label L_exit;
7477 Label L_fill_2_bytes, L_fill_4_bytes;
7478
7479 #if defined(COMPILER2) && defined(_LP64)
11560
11561 // Load top.
11562 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
11563
11564 // Check if the lock-stack is full.
11565 cmpl(top, LockStack::end_offset());
11566 jcc(Assembler::greaterEqual, slow);
11567
11568 // Check for recursion.
11569 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
11570 jcc(Assembler::equal, push);
11571
11572 // Check header for monitor (0b10).
11573 testptr(reg_rax, markWord::monitor_value);
11574 jcc(Assembler::notZero, slow);
11575
11576 // Try to lock. Transition lock bits 0b01 => 0b00
11577 movptr(tmp, reg_rax);
11578 andptr(tmp, ~(int32_t)markWord::unlocked_value);
11579 orptr(reg_rax, markWord::unlocked_value);
11580 if (EnableValhalla) {
11581 // Mask inline_type bit such that we go to the slow path if object is an inline type
11582 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
11583 }
11584 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
11585 jcc(Assembler::notEqual, slow);
11586
11587 // Restore top, CAS clobbers register.
11588 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
11589
11590 bind(push);
11591 // After successful lock, push object on lock-stack.
11592 movptr(Address(thread, top), obj);
11593 incrementl(top, oopSize);
11594 movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
11595 }
11596
11597 // Implements lightweight-unlocking.
11598 //
11599 // obj: the object to be unlocked
11600 // reg_rax: rax
11601 // thread: the thread
11602 // tmp: a temporary register
11603 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
|