9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "code/compiledIC.hpp"
30 #include "compiler/compileTask.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTable.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39 #include "interpreter/bytecodeHistogram.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/interpreterRuntime.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
304 uint32_t insn2 = insn_at(insn_addr, 1);
305 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
306 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
307 guarantee(((dest >> size) << size) == dest, "misaligned target");
308 return 2;
309 }
310 static int adrpAdd(address insn_addr, address &target) {
311 uintptr_t dest = (uintptr_t)target;
312 int offset_lo = dest & 0xfff;
313 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
314 return 2;
315 }
316 static int adrpMovk(address insn_addr, address &target) {
317 uintptr_t dest = uintptr_t(target);
318 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
319 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
320 target = address(dest);
321 return 2;
322 }
323 static int immediate(address insn_addr, address &target) {
324 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
325 uint64_t dest = (uint64_t)target;
326 // Move wide constant
327 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
328 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
329 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
330 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
331 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
332 return 3;
333 }
334 static void verify(address insn_addr, address &target) {
335 #ifdef ASSERT
336 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
337 if (!(address_is == target)) {
338 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
339 disnm((intptr_t)insn_addr);
340 assert(address_is == target, "should be");
341 }
342 #endif
343 }
431 uint32_t insn2 = insn_at(insn_addr, 1);
432 uint64_t dest = uint64_t(target);
433 dest = (dest & 0xffff0000ffffffff) |
434 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
435 target = address(dest);
436
437 // We know the destination 4k page. Maybe we have a third
438 // instruction.
439 uint32_t insn = insn_at(insn_addr, 0);
440 uint32_t insn3 = insn_at(insn_addr, 2);
441 ptrdiff_t byte_offset;
442 if (offset_for(insn, insn3, byte_offset)) {
443 target += byte_offset;
444 return 3;
445 } else {
446 return 2;
447 }
448 }
449 static int immediate(address insn_addr, address &target) {
450 uint32_t *insns = (uint32_t *)insn_addr;
451 assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
452 // Move wide constant: movz, movk, movk. See movptr().
453 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
454 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
455 target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
456 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
457 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
458 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
459 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
460 return 3;
461 }
462 static void verify(address insn_addr, address &target) {
463 }
464 };
465
466 address MacroAssembler::target_addr_for_insn(address insn_addr) {
467 address target;
468 RelocActions<AArch64Decoder>::run(insn_addr, target);
469 return target;
470 }
934 // Max stub size: alignment nop, TrampolineStub.
935 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
936 }
937
938 void MacroAssembler::emit_static_call_stub() {
939 // CompiledDirectCall::set_to_interpreted knows the
940 // exact layout of this stub.
941
942 isb();
943 mov_metadata(rmethod, nullptr);
944
945 // Jump to the entry point of the c2i stub.
946 if (codestub_branch_needs_far_jump()) {
947 movptr(rscratch1, 0);
948 br(rscratch1);
949 } else {
950 b(pc());
951 }
952 }
953
954 int MacroAssembler::static_call_stub_size() {
955 if (!codestub_branch_needs_far_jump()) {
956 // isb; movk; movz; movz; b
957 return 5 * NativeInstruction::instruction_size;
958 }
959 // isb; movk; movz; movz; movk; movz; movz; br
960 return 8 * NativeInstruction::instruction_size;
961 }
962
963 void MacroAssembler::c2bool(Register x) {
964 // implements x == 0 ? 0 : 1
965 // note: must only look at least-significant byte of x
966 // since C-style booleans are stored in one byte
967 // only! (was bug)
968 tst(x, 0xff);
969 cset(x, Assembler::NE);
970 }
971
972 address MacroAssembler::ic_call(address entry, jint method_index) {
973 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
974 movptr(rscratch2, (intptr_t)Universe::non_oop_word());
975 return trampoline_call(Address(entry, rh));
976 }
977
978 int MacroAssembler::ic_check_size() {
3439
3440 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3441 if (decrement.is_register()) {
3442 sub(Rd, Rn, decrement.as_register());
3443 } else {
3444 sub(Rd, Rn, decrement.as_constant());
3445 }
3446 }
3447
3448 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3449 if (decrement.is_register()) {
3450 subw(Rd, Rn, decrement.as_register());
3451 } else {
3452 subw(Rd, Rn, decrement.as_constant());
3453 }
3454 }
3455
3456 void MacroAssembler::reinit_heapbase()
3457 {
3458 if (UseCompressedOops) {
3459 if (Universe::is_fully_initialized()) {
3460 mov(rheapbase, CompressedOops::base());
3461 } else {
3462 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3463 ldr(rheapbase, Address(rheapbase));
3464 }
3465 }
3466 }
3467
3468 // A generic CAS; success or failure is in the EQ flag. A weak CAS
3469 // doesn't retry and may fail spuriously. If the oldval is wanted,
3470 // Pass a register for the result, otherwise pass noreg.
3471
3472 // Clobbers rscratch1
3473 void MacroAssembler::cmpxchg(Register addr, Register expected,
3474 Register new_val,
3475 enum operand_size size,
3476 bool acquire, bool release,
3477 bool weak,
3478 Register result) {
3479 if (result == noreg) result = rscratch1;
5117 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5118 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5119 ldr(dst, Address(rmethod, Method::const_offset()));
5120 ldr(dst, Address(dst, ConstMethod::constants_offset()));
5121 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5122 ldr(dst, Address(dst, mirror_offset));
5123 resolve_oop_handle(dst, tmp1, tmp2);
5124 }
5125
5126 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5127 assert_different_registers(obj, klass, tmp);
5128 if (UseCompressedClassPointers) {
5129 if (UseCompactObjectHeaders) {
5130 load_narrow_klass_compact(tmp, obj);
5131 } else {
5132 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5133 }
5134 if (CompressedKlassPointers::base() == nullptr) {
5135 cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5136 return;
5137 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5138 && CompressedKlassPointers::shift() == 0) {
5139 // Only the bottom 32 bits matter
5140 cmpw(klass, tmp);
5141 return;
5142 }
5143 decode_klass_not_null(tmp);
5144 } else {
5145 ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5146 }
5147 cmp(klass, tmp);
5148 }
5149
5150 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5151 if (UseCompactObjectHeaders) {
5152 load_narrow_klass_compact(tmp1, obj1);
5153 load_narrow_klass_compact(tmp2, obj2);
5154 cmpw(tmp1, tmp2);
5155 } else if (UseCompressedClassPointers) {
5156 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5157 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5376 if (dst != src) {
5377 // we can load the base into dst, subtract it formthe src and shift down
5378 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5379 ldr(dst, dst);
5380 sub(dst, src, dst);
5381 lsr(dst, dst, shift);
5382 } else {
5383 // we need an extra register in order to load the coop base
5384 Register tmp = pick_different_tmp(dst, src);
5385 RegSet regs = RegSet::of(tmp);
5386 push(regs, sp);
5387 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5388 ldr(tmp, tmp);
5389 sub(dst, src, tmp);
5390 lsr(dst, dst, shift);
5391 pop(regs, sp);
5392 }
5393 }
5394
5395 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5396 if (AOTCodeCache::is_on_for_dump()) {
5397 encode_klass_not_null_for_aot(dst, src);
5398 return;
5399 }
5400
5401 switch (klass_decode_mode()) {
5402 case KlassDecodeZero:
5403 if (CompressedKlassPointers::shift() != 0) {
5404 lsr(dst, src, CompressedKlassPointers::shift());
5405 } else {
5406 if (dst != src) mov(dst, src);
5407 }
5408 break;
5409
5410 case KlassDecodeXor:
5411 if (CompressedKlassPointers::shift() != 0) {
5412 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5413 lsr(dst, dst, CompressedKlassPointers::shift());
5414 } else {
5415 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5416 }
5442 if (dst != src) {
5443 // we can load the base into dst then add the offset with a suitable shift
5444 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5445 ldr(dst, dst);
5446 add(dst, dst, src, LSL, shift);
5447 } else {
5448 // we need an extra register in order to load the coop base
5449 Register tmp = pick_different_tmp(dst, src);
5450 RegSet regs = RegSet::of(tmp);
5451 push(regs, sp);
5452 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5453 ldr(tmp, tmp);
5454 add(dst, tmp, src, LSL, shift);
5455 pop(regs, sp);
5456 }
5457 }
5458
5459 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5460 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5461
5462 if (AOTCodeCache::is_on_for_dump()) {
5463 decode_klass_not_null_for_aot(dst, src);
5464 return;
5465 }
5466
5467 switch (klass_decode_mode()) {
5468 case KlassDecodeZero:
5469 if (CompressedKlassPointers::shift() != 0) {
5470 lsl(dst, src, CompressedKlassPointers::shift());
5471 } else {
5472 if (dst != src) mov(dst, src);
5473 }
5474 break;
5475
5476 case KlassDecodeXor:
5477 if (CompressedKlassPointers::shift() != 0) {
5478 lsl(dst, src, CompressedKlassPointers::shift());
5479 eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5480 } else {
5481 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5482 }
|
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciUtilities.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/interpreterRuntime.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedKlass.inline.hpp"
49 #include "oops/compressedOops.inline.hpp"
305 uint32_t insn2 = insn_at(insn_addr, 1);
306 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
307 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
308 guarantee(((dest >> size) << size) == dest, "misaligned target");
309 return 2;
310 }
311 static int adrpAdd(address insn_addr, address &target) {
312 uintptr_t dest = (uintptr_t)target;
313 int offset_lo = dest & 0xfff;
314 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
315 return 2;
316 }
317 static int adrpMovk(address insn_addr, address &target) {
318 uintptr_t dest = uintptr_t(target);
319 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
320 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
321 target = address(dest);
322 return 2;
323 }
324 static int immediate(address insn_addr, address &target) {
325 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
326 // We encode narrow ones by setting the upper 16 bits in the first
327 // instruction.
328 if (Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010101) {
329 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
330 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
331 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
332 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
333 return 2;
334 }
335 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
336 uint64_t dest = (uint64_t)target;
337 // Move wide constant
338 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
339 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
340 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
341 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
342 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
343 return 3;
344 }
345 static void verify(address insn_addr, address &target) {
346 #ifdef ASSERT
347 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
348 if (!(address_is == target)) {
349 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
350 disnm((intptr_t)insn_addr);
351 assert(address_is == target, "should be");
352 }
353 #endif
354 }
442 uint32_t insn2 = insn_at(insn_addr, 1);
443 uint64_t dest = uint64_t(target);
444 dest = (dest & 0xffff0000ffffffff) |
445 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
446 target = address(dest);
447
448 // We know the destination 4k page. Maybe we have a third
449 // instruction.
450 uint32_t insn = insn_at(insn_addr, 0);
451 uint32_t insn3 = insn_at(insn_addr, 2);
452 ptrdiff_t byte_offset;
453 if (offset_for(insn, insn3, byte_offset)) {
454 target += byte_offset;
455 return 3;
456 } else {
457 return 2;
458 }
459 }
460 static int immediate(address insn_addr, address &target) {
461 uint32_t *insns = (uint32_t *)insn_addr;
462 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
463 // We encode narrow ones by setting the upper 16 bits in the first
464 // instruction.
465 if (Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010101) {
466 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
467 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(insns[0], 20, 5)) << 16)
468 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
469 target = (address)CompressedKlassPointers::decode(nk);
470 return 2;
471 }
472 assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
473 // Move wide constant: movz, movk, movk. See movptr().
474 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
475 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
476 target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
477 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
478 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
479 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
480 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
481 return 3;
482 }
483 static void verify(address insn_addr, address &target) {
484 }
485 };
486
487 address MacroAssembler::target_addr_for_insn(address insn_addr) {
488 address target;
489 RelocActions<AArch64Decoder>::run(insn_addr, target);
490 return target;
491 }
955 // Max stub size: alignment nop, TrampolineStub.
956 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
957 }
958
959 void MacroAssembler::emit_static_call_stub() {
960 // CompiledDirectCall::set_to_interpreted knows the
961 // exact layout of this stub.
962
963 isb();
964 mov_metadata(rmethod, nullptr);
965
966 // Jump to the entry point of the c2i stub.
967 if (codestub_branch_needs_far_jump()) {
968 movptr(rscratch1, 0);
969 br(rscratch1);
970 } else {
971 b(pc());
972 }
973 }
974
975 int MacroAssembler::max_static_call_stub_size() {
976 // isb; movk; movz; movz; movk; movz; movz; br
977 return 8 * NativeInstruction::instruction_size;
978 }
979
980 void MacroAssembler::c2bool(Register x) {
981 // implements x == 0 ? 0 : 1
982 // note: must only look at least-significant byte of x
983 // since C-style booleans are stored in one byte
984 // only! (was bug)
985 tst(x, 0xff);
986 cset(x, Assembler::NE);
987 }
988
989 address MacroAssembler::ic_call(address entry, jint method_index) {
990 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
991 movptr(rscratch2, (intptr_t)Universe::non_oop_word());
992 return trampoline_call(Address(entry, rh));
993 }
994
995 int MacroAssembler::ic_check_size() {
3456
3457 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3458 if (decrement.is_register()) {
3459 sub(Rd, Rn, decrement.as_register());
3460 } else {
3461 sub(Rd, Rn, decrement.as_constant());
3462 }
3463 }
3464
3465 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3466 if (decrement.is_register()) {
3467 subw(Rd, Rn, decrement.as_register());
3468 } else {
3469 subw(Rd, Rn, decrement.as_constant());
3470 }
3471 }
3472
3473 void MacroAssembler::reinit_heapbase()
3474 {
3475 if (UseCompressedOops) {
3476 if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_dump()) {
3477 mov(rheapbase, CompressedOops::base());
3478 } else {
3479 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3480 ldr(rheapbase, Address(rheapbase));
3481 }
3482 }
3483 }
3484
3485 // A generic CAS; success or failure is in the EQ flag. A weak CAS
3486 // doesn't retry and may fail spuriously. If the oldval is wanted,
3487 // Pass a register for the result, otherwise pass noreg.
3488
3489 // Clobbers rscratch1
3490 void MacroAssembler::cmpxchg(Register addr, Register expected,
3491 Register new_val,
3492 enum operand_size size,
3493 bool acquire, bool release,
3494 bool weak,
3495 Register result) {
3496 if (result == noreg) result = rscratch1;
5134 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5135 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5136 ldr(dst, Address(rmethod, Method::const_offset()));
5137 ldr(dst, Address(dst, ConstMethod::constants_offset()));
5138 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5139 ldr(dst, Address(dst, mirror_offset));
5140 resolve_oop_handle(dst, tmp1, tmp2);
5141 }
5142
5143 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5144 assert_different_registers(obj, klass, tmp);
5145 if (UseCompressedClassPointers) {
5146 if (UseCompactObjectHeaders) {
5147 load_narrow_klass_compact(tmp, obj);
5148 } else {
5149 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5150 }
5151 if (CompressedKlassPointers::base() == nullptr) {
5152 cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5153 return;
5154 } else if (!AOTCodeCache::is_on_for_dump() &&
5155 ((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5156 && CompressedKlassPointers::shift() == 0) {
5157 // Only the bottom 32 bits matter
5158 cmpw(klass, tmp);
5159 return;
5160 }
5161 decode_klass_not_null(tmp);
5162 } else {
5163 ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5164 }
5165 cmp(klass, tmp);
5166 }
5167
5168 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5169 if (UseCompactObjectHeaders) {
5170 load_narrow_klass_compact(tmp1, obj1);
5171 load_narrow_klass_compact(tmp2, obj2);
5172 cmpw(tmp1, tmp2);
5173 } else if (UseCompressedClassPointers) {
5174 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5175 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5394 if (dst != src) {
5395 // we can load the base into dst, subtract it formthe src and shift down
5396 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5397 ldr(dst, dst);
5398 sub(dst, src, dst);
5399 lsr(dst, dst, shift);
5400 } else {
5401 // we need an extra register in order to load the coop base
5402 Register tmp = pick_different_tmp(dst, src);
5403 RegSet regs = RegSet::of(tmp);
5404 push(regs, sp);
5405 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5406 ldr(tmp, tmp);
5407 sub(dst, src, tmp);
5408 lsr(dst, dst, shift);
5409 pop(regs, sp);
5410 }
5411 }
5412
5413 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5414 if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5415 encode_klass_not_null_for_aot(dst, src);
5416 return;
5417 }
5418
5419 switch (klass_decode_mode()) {
5420 case KlassDecodeZero:
5421 if (CompressedKlassPointers::shift() != 0) {
5422 lsr(dst, src, CompressedKlassPointers::shift());
5423 } else {
5424 if (dst != src) mov(dst, src);
5425 }
5426 break;
5427
5428 case KlassDecodeXor:
5429 if (CompressedKlassPointers::shift() != 0) {
5430 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5431 lsr(dst, dst, CompressedKlassPointers::shift());
5432 } else {
5433 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5434 }
5460 if (dst != src) {
5461 // we can load the base into dst then add the offset with a suitable shift
5462 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5463 ldr(dst, dst);
5464 add(dst, dst, src, LSL, shift);
5465 } else {
5466 // we need an extra register in order to load the coop base
5467 Register tmp = pick_different_tmp(dst, src);
5468 RegSet regs = RegSet::of(tmp);
5469 push(regs, sp);
5470 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5471 ldr(tmp, tmp);
5472 add(dst, tmp, src, LSL, shift);
5473 pop(regs, sp);
5474 }
5475 }
5476
5477 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5478 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5479
5480 if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5481 decode_klass_not_null_for_aot(dst, src);
5482 return;
5483 }
5484
5485 switch (klass_decode_mode()) {
5486 case KlassDecodeZero:
5487 if (CompressedKlassPointers::shift() != 0) {
5488 lsl(dst, src, CompressedKlassPointers::shift());
5489 } else {
5490 if (dst != src) mov(dst, src);
5491 }
5492 break;
5493
5494 case KlassDecodeXor:
5495 if (CompressedKlassPointers::shift() != 0) {
5496 lsl(dst, src, CompressedKlassPointers::shift());
5497 eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5498 } else {
5499 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5500 }
|