1333 if (reachable(entry)) {
1334 Assembler::call_literal(entry.target(), entry.rspec());
1335 } else {
1336 lea(rscratch, entry);
1337 Assembler::call(rscratch);
1338 }
1339 }
1340
1341 void MacroAssembler::ic_call(address entry, jint method_index) {
1342 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
1343 #ifdef _LP64
1344 // Needs full 64-bit immediate for later patching.
1345 mov64(rax, (int64_t)Universe::non_oop_word());
1346 #else
1347 movptr(rax, (intptr_t)Universe::non_oop_word());
1348 #endif
1349 call(AddressLiteral(entry, rh));
1350 }
1351
1352 int MacroAssembler::ic_check_size() {
1353 return LP64_ONLY(14) NOT_LP64(12);
1354 }
1355
1356 int MacroAssembler::ic_check(int end_alignment) {
1357 Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
1358 Register data = rax;
1359 Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx);
1360
1361 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
1362 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
1363 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1364 // before the inline cache check here, and not after
1365 align(end_alignment, offset() + ic_check_size());
1366
1367 int uep_offset = offset();
1368
1369 if (UseCompressedClassPointers) {
1370 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1371 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
1372 } else {
1373 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1374 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
1375 }
1376
1377 // if inline cache check fails, then jump to runtime routine
1378 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1379 assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
1380
1381 return uep_offset;
1382 }
1383
1384 void MacroAssembler::emit_static_call_stub() {
1385 // Static stub relocation also tags the Method* in the code-stream.
1386 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
1387 // This is recognized as unresolved by relocs/nativeinst/ic code.
1388 jump(RuntimeAddress(pc()));
1389 }
1390
1391 // Implementation of call_VM versions
1392
1393 void MacroAssembler::call_VM(Register oop_result,
1394 address entry_point,
1395 bool check_exceptions) {
1396 Label C, E;
1397 call(C, relocInfo::none);
1398 jmp(E);
1399
5652
5653 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
5654 // get mirror
5655 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5656 load_method_holder(mirror, method);
5657 movptr(mirror, Address(mirror, mirror_offset));
5658 resolve_oop_handle(mirror, tmp);
5659 }
5660
5661 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5662 load_method_holder(rresult, rmethod);
5663 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5664 }
5665
5666 void MacroAssembler::load_method_holder(Register holder, Register method) {
5667 movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
5668 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5669 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5670 }
5671
5672 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
5673 assert_different_registers(src, tmp);
5674 assert_different_registers(dst, tmp);
5675 #ifdef _LP64
5676 if (UseCompressedClassPointers) {
5677 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5678 decode_klass_not_null(dst, tmp);
5679 } else
5680 #endif
5681 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5682 }
5683
5684 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
5685 assert_different_registers(src, tmp);
5686 assert_different_registers(dst, tmp);
5687 #ifdef _LP64
5688 if (UseCompressedClassPointers) {
5689 encode_klass_not_null(src, tmp);
5690 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5691 } else
5692 #endif
5693 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5694 }
5695
5696 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
5697 Register tmp1, Register thread_tmp) {
5698 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5699 decorators = AccessInternal::decorator_fixup(decorators, type);
5700 bool as_raw = (decorators & AS_RAW) != 0;
5701 if (as_raw) {
5702 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
5703 } else {
5704 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
5705 }
5706 }
5707
5708 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
5709 Register tmp1, Register tmp2, Register tmp3) {
5710 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5711 decorators = AccessInternal::decorator_fixup(decorators, type);
5712 bool as_raw = (decorators & AS_RAW) != 0;
5713 if (as_raw) {
5714 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5715 } else {
5723 }
5724
5725 // Doesn't do verification, generates fixed size code
5726 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5727 Register thread_tmp, DecoratorSet decorators) {
5728 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
5729 }
5730
5731 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5732 Register tmp2, Register tmp3, DecoratorSet decorators) {
5733 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5734 }
5735
5736 // Used for storing nulls.
5737 void MacroAssembler::store_heap_oop_null(Address dst) {
5738 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5739 }
5740
5741 #ifdef _LP64
5742 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5743 if (UseCompressedClassPointers) {
5744 // Store to klass gap in destination
5745 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5746 }
5747 }
5748
5749 #ifdef ASSERT
5750 void MacroAssembler::verify_heapbase(const char* msg) {
5751 assert (UseCompressedOops, "should be compressed");
5752 assert (Universe::heap() != nullptr, "java heap should be initialized");
5753 if (CheckCompressedOops) {
5754 Label ok;
5755 ExternalAddress src2(CompressedOops::ptrs_base_addr());
5756 const bool is_src2_reachable = reachable(src2);
5757 if (!is_src2_reachable) {
5758 push(rscratch1); // cmpptr trashes rscratch1
5759 }
5760 cmpptr(r12_heapbase, src2, rscratch1);
5761 jcc(Assembler::equal, ok);
5762 STOP(msg);
5887 shlq(dst, LogMinObjAlignmentInBytes);
5888 if (CompressedOops::base() != nullptr) {
5889 addq(dst, r12_heapbase);
5890 }
5891 }
5892 } else {
5893 assert (CompressedOops::base() == nullptr, "sanity");
5894 if (dst != src) {
5895 movq(dst, src);
5896 }
5897 }
5898 }
5899
5900 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5901 assert_different_registers(r, tmp);
5902 if (CompressedKlassPointers::base() != nullptr) {
5903 mov64(tmp, (int64_t)CompressedKlassPointers::base());
5904 subq(r, tmp);
5905 }
5906 if (CompressedKlassPointers::shift() != 0) {
5907 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5908 shrq(r, LogKlassAlignmentInBytes);
5909 }
5910 }
5911
5912 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
5913 assert_different_registers(src, dst);
5914 if (CompressedKlassPointers::base() != nullptr) {
5915 mov64(dst, -(int64_t)CompressedKlassPointers::base());
5916 addq(dst, src);
5917 } else {
5918 movptr(dst, src);
5919 }
5920 if (CompressedKlassPointers::shift() != 0) {
5921 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5922 shrq(dst, LogKlassAlignmentInBytes);
5923 }
5924 }
5925
5926 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
5927 assert_different_registers(r, tmp);
5928 // Note: it will change flags
5929 assert(UseCompressedClassPointers, "should only be used for compressed headers");
5930 // Cannot assert, unverified entry point counts instructions (see .ad file)
5931 // vtableStubs also counts instructions in pd_code_size_limit.
5932 // Also do not verify_oop as this is called by verify_oop.
5933 if (CompressedKlassPointers::shift() != 0) {
5934 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5935 shlq(r, LogKlassAlignmentInBytes);
5936 }
5937 if (CompressedKlassPointers::base() != nullptr) {
5938 mov64(tmp, (int64_t)CompressedKlassPointers::base());
5939 addq(r, tmp);
5940 }
5941 }
5942
5943 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5944 assert_different_registers(src, dst);
5945 // Note: it will change flags
5946 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5947 // Cannot assert, unverified entry point counts instructions (see .ad file)
5948 // vtableStubs also counts instructions in pd_code_size_limit.
5949 // Also do not verify_oop as this is called by verify_oop.
5950
5951 if (CompressedKlassPointers::base() == nullptr &&
5952 CompressedKlassPointers::shift() == 0) {
5953 // The best case scenario is that there is no base or shift. Then it is already
5954 // a pointer that needs nothing but a register rename.
5955 movl(dst, src);
5956 } else {
5957 if (CompressedKlassPointers::base() != nullptr) {
5958 mov64(dst, (int64_t)CompressedKlassPointers::base());
5959 } else {
5960 xorq(dst, dst);
5961 }
5962 if (CompressedKlassPointers::shift() != 0) {
5963 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
5964 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
5965 leaq(dst, Address(dst, src, Address::times_8, 0));
5966 } else {
5967 addq(dst, src);
5968 }
5969 }
5970 }
5971
5972 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5973 assert (UseCompressedOops, "should only be used for compressed headers");
5974 assert (Universe::heap() != nullptr, "java heap should be initialized");
5975 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5976 int oop_index = oop_recorder()->find_index(obj);
5977 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5978 mov_narrow_oop(dst, oop_index, rspec);
5979 }
5980
5981 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5982 assert (UseCompressedOops, "should only be used for compressed headers");
5983 assert (Universe::heap() != nullptr, "java heap should be initialized");
5984 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5985 int oop_index = oop_recorder()->find_index(obj);
5986 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5987 mov_narrow_oop(dst, oop_index, rspec);
10254 if (bias == 0) {
10255 testptr(sp, 2 * wordSize - 1);
10256 } else {
10257 // lea(tmp, Address(rsp, bias);
10258 mov(tmp, sp);
10259 addptr(tmp, bias);
10260 testptr(tmp, 2 * wordSize - 1);
10261 }
10262 jcc(Assembler::equal, L_stack_ok);
10263 block_comment(msg);
10264 stop(msg);
10265 bind(L_stack_ok);
10266 }
10267
10268 // Implements lightweight-locking.
10269 //
10270 // obj: the object to be locked
10271 // reg_rax: rax
10272 // thread: the thread which attempts to lock obj
10273 // tmp: a temporary register
10274 void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
10275 assert(reg_rax == rax, "");
10276 assert_different_registers(obj, reg_rax, thread, tmp);
10277
10278 Label push;
10279 const Register top = tmp;
10280
10281 // Preload the markWord. It is important that this is the first
10282 // instruction emitted as it is part of C1's null check semantics.
10283 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10284
10285 // Load top.
10286 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10287
10288 // Check if the lock-stack is full.
10289 cmpl(top, LockStack::end_offset());
10290 jcc(Assembler::greaterEqual, slow);
10291
10292 // Check for recursion.
10293 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10294 jcc(Assembler::equal, push);
10295
10296 // Check header for monitor (0b10).
10297 testptr(reg_rax, markWord::monitor_value);
10298 jcc(Assembler::notZero, slow);
10299
10300 // Try to lock. Transition lock bits 0b01 => 0b00
10301 movptr(tmp, reg_rax);
10302 andptr(tmp, ~(int32_t)markWord::unlocked_value);
10303 orptr(reg_rax, markWord::unlocked_value);
10304 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10305 jcc(Assembler::notEqual, slow);
10306
10307 // Restore top, CAS clobbers register.
10308 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10309
10310 bind(push);
10311 // After successful lock, push object on lock-stack.
10312 movptr(Address(thread, top), obj);
10313 incrementl(top, oopSize);
10314 movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10315 }
10316
10317 // Implements lightweight-unlocking.
10318 //
10319 // obj: the object to be unlocked
10320 // reg_rax: rax
10321 // thread: the thread
10322 // tmp: a temporary register
10323 //
10324 // x86_32 Note: reg_rax and thread may alias each other due to limited register
10325 // availiability.
10326 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
10327 assert(reg_rax == rax, "");
10328 assert_different_registers(obj, reg_rax, tmp);
10329 LP64_ONLY(assert_different_registers(obj, reg_rax, thread, tmp);)
10330
10331 Label unlocked, push_and_slow;
10332 const Register top = tmp;
10333
10334 // Check if obj is top of lock-stack.
10335 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10336 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10337 jcc(Assembler::notEqual, slow);
10338
10339 // Pop lock-stack.
10340 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
10341 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10342
10343 // Check if recursive.
10344 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
10345 jcc(Assembler::equal, unlocked);
10346
10347 // Not recursive. Check header for monitor (0b10).
10348 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10349 testptr(reg_rax, markWord::monitor_value);
10350 jcc(Assembler::notZero, push_and_slow);
10351
10352 #ifdef ASSERT
10353 // Check header not unlocked (0b01).
10354 Label not_unlocked;
10355 testptr(reg_rax, markWord::unlocked_value);
10356 jcc(Assembler::zero, not_unlocked);
10357 stop("lightweight_unlock already unlocked");
10358 bind(not_unlocked);
10359 #endif
10360
10361 // Try to unlock. Transition lock bits 0b00 => 0b01
10362 movptr(tmp, reg_rax);
10363 orptr(tmp, markWord::unlocked_value);
10364 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10365 jcc(Assembler::equal, unlocked);
10366
10367 bind(push_and_slow);
10368 // Restore lock-stack and handle the unlock in runtime.
10369 if (thread == reg_rax) {
10370 // On x86_32 we may lose the thread.
10371 get_thread(thread);
10372 }
10373 #ifdef ASSERT
10374 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10375 movptr(Address(thread, top), obj);
10376 #endif
10377 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10378 jmp(slow);
10379
10380 bind(unlocked);
10381 }
|
1333 if (reachable(entry)) {
1334 Assembler::call_literal(entry.target(), entry.rspec());
1335 } else {
1336 lea(rscratch, entry);
1337 Assembler::call(rscratch);
1338 }
1339 }
1340
1341 void MacroAssembler::ic_call(address entry, jint method_index) {
1342 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
1343 #ifdef _LP64
1344 // Needs full 64-bit immediate for later patching.
1345 mov64(rax, (int64_t)Universe::non_oop_word());
1346 #else
1347 movptr(rax, (intptr_t)Universe::non_oop_word());
1348 #endif
1349 call(AddressLiteral(entry, rh));
1350 }
1351
1352 int MacroAssembler::ic_check_size() {
1353 return
1354 LP64_ONLY(UseCompactObjectHeaders ? 17 : 14) NOT_LP64(12);
1355 }
1356
1357 int MacroAssembler::ic_check(int end_alignment) {
1358 Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
1359 Register data = rax;
1360 Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx);
1361
1362 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
1363 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
1364 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1365 // before the inline cache check here, and not after
1366 align(end_alignment, offset() + ic_check_size());
1367
1368 int uep_offset = offset();
1369
1370 #ifdef _LP64
1371 if (UseCompactObjectHeaders) {
1372 load_nklass_compact(temp, receiver);
1373 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
1374 } else
1375 #endif
1376 if (UseCompressedClassPointers) {
1377 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1378 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
1379 } else {
1380 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1381 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
1382 }
1383
1384 // if inline cache check fails, then jump to runtime routine
1385 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1386 assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
1387
1388 return uep_offset;
1389 }
1390
1391 void MacroAssembler::emit_static_call_stub() {
1392 // Static stub relocation also tags the Method* in the code-stream.
1393 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
1394 // This is recognized as unresolved by relocs/nativeinst/ic code.
1395 jump(RuntimeAddress(pc()));
1396 }
1397
1398 // Implementation of call_VM versions
1399
1400 void MacroAssembler::call_VM(Register oop_result,
1401 address entry_point,
1402 bool check_exceptions) {
1403 Label C, E;
1404 call(C, relocInfo::none);
1405 jmp(E);
1406
5659
5660 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
5661 // get mirror
5662 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5663 load_method_holder(mirror, method);
5664 movptr(mirror, Address(mirror, mirror_offset));
5665 resolve_oop_handle(mirror, tmp);
5666 }
5667
5668 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5669 load_method_holder(rresult, rmethod);
5670 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5671 }
5672
5673 void MacroAssembler::load_method_holder(Register holder, Register method) {
5674 movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
5675 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
5676 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
5677 }
5678
5679 #ifdef _LP64
5680 void MacroAssembler::load_nklass_compact(Register dst, Register src) {
5681 assert(UseCompactObjectHeaders, "expect compact object headers");
5682 movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5683 shrq(dst, markWord::klass_shift);
5684 }
5685 #endif
5686
5687 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
5688 BLOCK_COMMENT("load_klass");
5689 assert_different_registers(src, tmp);
5690 assert_different_registers(dst, tmp);
5691 #ifdef _LP64
5692 if (UseCompactObjectHeaders) {
5693 load_nklass_compact(dst, src);
5694 decode_klass_not_null(dst, tmp);
5695 } else if (UseCompressedClassPointers) {
5696 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5697 decode_klass_not_null(dst, tmp);
5698 } else
5699 #endif
5700 {
5701 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5702 }
5703 }
5704
5705 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
5706 assert(!UseCompactObjectHeaders, "not with compact headers");
5707 assert_different_registers(src, tmp);
5708 assert_different_registers(dst, tmp);
5709 #ifdef _LP64
5710 if (UseCompressedClassPointers) {
5711 encode_klass_not_null(src, tmp);
5712 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5713 } else
5714 #endif
5715 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5716 }
5717
5718 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
5719 BLOCK_COMMENT("cmp_klass 1");
5720 #ifdef _LP64
5721 if (UseCompactObjectHeaders) {
5722 load_nklass_compact(tmp, obj);
5723 cmpl(klass, tmp);
5724 } else if (UseCompressedClassPointers) {
5725 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5726 } else
5727 #endif
5728 {
5729 cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
5730 }
5731 }
5732
5733 void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) {
5734 BLOCK_COMMENT("cmp_klass 2");
5735 #ifdef _LP64
5736 if (UseCompactObjectHeaders) {
5737 assert(tmp2 != noreg, "need tmp2");
5738 assert_different_registers(src, dst, tmp1, tmp2);
5739 load_nklass_compact(tmp1, src);
5740 load_nklass_compact(tmp2, dst);
5741 cmpl(tmp1, tmp2);
5742 } else if (UseCompressedClassPointers) {
5743 movl(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
5744 cmpl(tmp1, Address(dst, oopDesc::klass_offset_in_bytes()));
5745 } else
5746 #endif
5747 {
5748 movptr(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
5749 cmpptr(tmp1, Address(dst, oopDesc::klass_offset_in_bytes()));
5750 }
5751 }
5752
5753 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
5754 Register tmp1, Register thread_tmp) {
5755 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5756 decorators = AccessInternal::decorator_fixup(decorators, type);
5757 bool as_raw = (decorators & AS_RAW) != 0;
5758 if (as_raw) {
5759 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
5760 } else {
5761 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
5762 }
5763 }
5764
5765 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
5766 Register tmp1, Register tmp2, Register tmp3) {
5767 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5768 decorators = AccessInternal::decorator_fixup(decorators, type);
5769 bool as_raw = (decorators & AS_RAW) != 0;
5770 if (as_raw) {
5771 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5772 } else {
5780 }
5781
5782 // Doesn't do verification, generates fixed size code
5783 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5784 Register thread_tmp, DecoratorSet decorators) {
5785 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
5786 }
5787
5788 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5789 Register tmp2, Register tmp3, DecoratorSet decorators) {
5790 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5791 }
5792
5793 // Used for storing nulls.
5794 void MacroAssembler::store_heap_oop_null(Address dst) {
5795 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5796 }
5797
5798 #ifdef _LP64
5799 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5800 assert(!UseCompactObjectHeaders, "Don't use with compact headers");
5801 if (UseCompressedClassPointers) {
5802 // Store to klass gap in destination
5803 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5804 }
5805 }
5806
5807 #ifdef ASSERT
5808 void MacroAssembler::verify_heapbase(const char* msg) {
5809 assert (UseCompressedOops, "should be compressed");
5810 assert (Universe::heap() != nullptr, "java heap should be initialized");
5811 if (CheckCompressedOops) {
5812 Label ok;
5813 ExternalAddress src2(CompressedOops::ptrs_base_addr());
5814 const bool is_src2_reachable = reachable(src2);
5815 if (!is_src2_reachable) {
5816 push(rscratch1); // cmpptr trashes rscratch1
5817 }
5818 cmpptr(r12_heapbase, src2, rscratch1);
5819 jcc(Assembler::equal, ok);
5820 STOP(msg);
5945 shlq(dst, LogMinObjAlignmentInBytes);
5946 if (CompressedOops::base() != nullptr) {
5947 addq(dst, r12_heapbase);
5948 }
5949 }
5950 } else {
5951 assert (CompressedOops::base() == nullptr, "sanity");
5952 if (dst != src) {
5953 movq(dst, src);
5954 }
5955 }
5956 }
5957
5958 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
5959 assert_different_registers(r, tmp);
5960 if (CompressedKlassPointers::base() != nullptr) {
5961 mov64(tmp, (int64_t)CompressedKlassPointers::base());
5962 subq(r, tmp);
5963 }
5964 if (CompressedKlassPointers::shift() != 0) {
5965 shrq(r, CompressedKlassPointers::shift());
5966 }
5967 }
5968
5969 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
5970 assert_different_registers(src, dst);
5971 if (CompressedKlassPointers::base() != nullptr) {
5972 mov64(dst, -(int64_t)CompressedKlassPointers::base());
5973 addq(dst, src);
5974 } else {
5975 movptr(dst, src);
5976 }
5977 if (CompressedKlassPointers::shift() != 0) {
5978 shrq(dst, CompressedKlassPointers::shift());
5979 }
5980 }
5981
5982 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
5983 assert_different_registers(r, tmp);
5984 // Note: it will change flags
5985 assert(UseCompressedClassPointers, "should only be used for compressed headers");
5986 // Cannot assert, unverified entry point counts instructions (see .ad file)
5987 // vtableStubs also counts instructions in pd_code_size_limit.
5988 // Also do not verify_oop as this is called by verify_oop.
5989 if (CompressedKlassPointers::shift() != 0) {
5990 shlq(r, CompressedKlassPointers::shift());
5991 }
5992 if (CompressedKlassPointers::base() != nullptr) {
5993 mov64(tmp, (int64_t)CompressedKlassPointers::base());
5994 addq(r, tmp);
5995 }
5996 }
5997
5998 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
5999 assert_different_registers(src, dst);
6000 // Note: it will change flags
6001 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6002 // Cannot assert, unverified entry point counts instructions (see .ad file)
6003 // vtableStubs also counts instructions in pd_code_size_limit.
6004 // Also do not verify_oop as this is called by verify_oop.
6005
6006 if (CompressedKlassPointers::base() == nullptr &&
6007 CompressedKlassPointers::shift() == 0) {
6008 // The best case scenario is that there is no base or shift. Then it is already
6009 // a pointer that needs nothing but a register rename.
6010 movl(dst, src);
6011 } else {
6012 if (CompressedKlassPointers::shift() <= Address::times_8) {
6013 if (CompressedKlassPointers::base() != nullptr) {
6014 mov64(dst, (int64_t)CompressedKlassPointers::base());
6015 } else {
6016 xorq(dst, dst);
6017 }
6018 if (CompressedKlassPointers::shift() != 0) {
6019 assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
6020 leaq(dst, Address(dst, src, Address::times_8, 0));
6021 } else {
6022 addq(dst, src);
6023 }
6024 } else {
6025 if (CompressedKlassPointers::base() != nullptr) {
6026 const uint64_t base_right_shifted =
6027 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
6028 mov64(dst, base_right_shifted);
6029 } else {
6030 xorq(dst, dst);
6031 }
6032 addq(dst, src);
6033 shlq(dst, CompressedKlassPointers::shift());
6034 }
6035 }
6036 }
6037
6038 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
6039 assert (UseCompressedOops, "should only be used for compressed headers");
6040 assert (Universe::heap() != nullptr, "java heap should be initialized");
6041 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6042 int oop_index = oop_recorder()->find_index(obj);
6043 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6044 mov_narrow_oop(dst, oop_index, rspec);
6045 }
6046
6047 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
6048 assert (UseCompressedOops, "should only be used for compressed headers");
6049 assert (Universe::heap() != nullptr, "java heap should be initialized");
6050 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
6051 int oop_index = oop_recorder()->find_index(obj);
6052 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6053 mov_narrow_oop(dst, oop_index, rspec);
10320 if (bias == 0) {
10321 testptr(sp, 2 * wordSize - 1);
10322 } else {
10323 // lea(tmp, Address(rsp, bias);
10324 mov(tmp, sp);
10325 addptr(tmp, bias);
10326 testptr(tmp, 2 * wordSize - 1);
10327 }
10328 jcc(Assembler::equal, L_stack_ok);
10329 block_comment(msg);
10330 stop(msg);
10331 bind(L_stack_ok);
10332 }
10333
10334 // Implements lightweight-locking.
10335 //
10336 // obj: the object to be locked
10337 // reg_rax: rax
10338 // thread: the thread which attempts to lock obj
10339 // tmp: a temporary register
10340 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
10341 assert(reg_rax == rax, "");
10342 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
10343
10344 Label push;
10345 const Register top = tmp;
10346
10347 // Preload the markWord. It is important that this is the first
10348 // instruction emitted as it is part of C1's null check semantics.
10349 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10350
10351 if (UseObjectMonitorTable) {
10352 // Clear cache in case fast locking succeeds.
10353 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
10354 }
10355
10356 // Load top.
10357 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10358
10359 // Check if the lock-stack is full.
10360 cmpl(top, LockStack::end_offset());
10361 jcc(Assembler::greaterEqual, slow);
10362
10363 // Check for recursion.
10364 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10365 jcc(Assembler::equal, push);
10366
10367 // Check header for monitor (0b10).
10368 testptr(reg_rax, markWord::monitor_value);
10369 jcc(Assembler::notZero, slow);
10370
10371 // Try to lock. Transition lock bits 0b01 => 0b00
10372 movptr(tmp, reg_rax);
10373 andptr(tmp, ~(int32_t)markWord::unlocked_value);
10374 orptr(reg_rax, markWord::unlocked_value);
10375 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10376 jcc(Assembler::notEqual, slow);
10377
10378 // Restore top, CAS clobbers register.
10379 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10380
10381 bind(push);
10382 // After successful lock, push object on lock-stack.
10383 movptr(Address(thread, top), obj);
10384 incrementl(top, oopSize);
10385 movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10386 }
10387
10388 // Implements lightweight-unlocking.
10389 //
10390 // obj: the object to be unlocked
10391 // reg_rax: rax
10392 // thread: the thread
10393 // tmp: a temporary register
10394 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
10395 assert(reg_rax == rax, "");
10396 assert_different_registers(obj, reg_rax, thread, tmp);
10397
10398 Label unlocked, push_and_slow;
10399 const Register top = tmp;
10400
10401 // Check if obj is top of lock-stack.
10402 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10403 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10404 jcc(Assembler::notEqual, slow);
10405
10406 // Pop lock-stack.
10407 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
10408 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10409
10410 // Check if recursive.
10411 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
10412 jcc(Assembler::equal, unlocked);
10413
10414 // Not recursive. Check header for monitor (0b10).
10415 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10416 testptr(reg_rax, markWord::monitor_value);
10417 jcc(Assembler::notZero, push_and_slow);
10418
10419 #ifdef ASSERT
10420 // Check header not unlocked (0b01).
10421 Label not_unlocked;
10422 testptr(reg_rax, markWord::unlocked_value);
10423 jcc(Assembler::zero, not_unlocked);
10424 stop("lightweight_unlock already unlocked");
10425 bind(not_unlocked);
10426 #endif
10427
10428 // Try to unlock. Transition lock bits 0b00 => 0b01
10429 movptr(tmp, reg_rax);
10430 orptr(tmp, markWord::unlocked_value);
10431 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10432 jcc(Assembler::equal, unlocked);
10433
10434 bind(push_and_slow);
10435 // Restore lock-stack and handle the unlock in runtime.
10436 #ifdef ASSERT
10437 movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10438 movptr(Address(thread, top), obj);
10439 #endif
10440 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10441 jmp(slow);
10442
10443 bind(unlocked);
10444 }
|