9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "code/compiledIC.hpp"
30 #include "compiler/compileTask.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTable.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39 #include "interpreter/bytecodeHistogram.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/interpreterRuntime.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
304 uint32_t insn2 = insn_at(insn_addr, 1);
305 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
306 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
307 guarantee(((dest >> size) << size) == dest, "misaligned target");
308 return 2;
309 }
310 static int adrpAdd(address insn_addr, address &target) {
311 uintptr_t dest = (uintptr_t)target;
312 int offset_lo = dest & 0xfff;
313 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
314 return 2;
315 }
316 static int adrpMovk(address insn_addr, address &target) {
317 uintptr_t dest = uintptr_t(target);
318 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
319 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
320 target = address(dest);
321 return 2;
322 }
323 static int immediate(address insn_addr, address &target) {
324 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
325 uint64_t dest = (uint64_t)target;
326 // Move wide constant
327 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
328 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
329 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
330 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
331 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
332 return 3;
333 }
334 static void verify(address insn_addr, address &target) {
335 #ifdef ASSERT
336 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
337 if (!(address_is == target)) {
338 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
339 disnm((intptr_t)insn_addr);
340 assert(address_is == target, "should be");
341 }
342 #endif
343 }
432 uint32_t insn2 = insn_at(insn_addr, 1);
433 uint64_t dest = uint64_t(target);
434 dest = (dest & 0xffff0000ffffffff) |
435 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
436 target = address(dest);
437
438 // We know the destination 4k page. Maybe we have a third
439 // instruction.
440 uint32_t insn = insn_at(insn_addr, 0);
441 uint32_t insn3 = insn_at(insn_addr, 2);
442 ptrdiff_t byte_offset;
443 if (offset_for(insn, insn3, byte_offset)) {
444 target += byte_offset;
445 return 3;
446 } else {
447 return 2;
448 }
449 }
450 static int immediate(address insn_addr, address &target) {
451 uint32_t *insns = (uint32_t *)insn_addr;
452 assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
453 // Move wide constant: movz, movk, movk. See movptr().
454 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
455 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
456 target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
457 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
458 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
459 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
460 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
461 return 3;
462 }
463 static void verify(address insn_addr, address &target) {
464 }
465 };
466
467 address MacroAssembler::target_addr_for_insn(address insn_addr) {
468 address target;
469 RelocActions<AArch64Decoder>::run(insn_addr, target);
470 return target;
471 }
937 // Max stub size: alignment nop, TrampolineStub.
938 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
939 }
940
941 void MacroAssembler::emit_static_call_stub() {
942 // CompiledDirectCall::set_to_interpreted knows the
943 // exact layout of this stub.
944
945 isb();
946 mov_metadata(rmethod, nullptr);
947
948 // Jump to the entry point of the c2i stub.
949 if (codestub_branch_needs_far_jump()) {
950 movptr(rscratch1, 0);
951 br(rscratch1);
952 } else {
953 b(pc());
954 }
955 }
956
957 int MacroAssembler::static_call_stub_size() {
958 if (!codestub_branch_needs_far_jump()) {
959 // isb; movk; movz; movz; b
960 return 5 * NativeInstruction::instruction_size;
961 }
962 // isb; movk; movz; movz; movk; movz; movz; br
963 return 8 * NativeInstruction::instruction_size;
964 }
965
966 void MacroAssembler::c2bool(Register x) {
967 // implements x == 0 ? 0 : 1
968 // note: must only look at least-significant byte of x
969 // since C-style booleans are stored in one byte
970 // only! (was bug)
971 tst(x, 0xff);
972 cset(x, Assembler::NE);
973 }
974
975 address MacroAssembler::ic_call(address entry, jint method_index) {
976 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
977 movptr(rscratch2, (intptr_t)Universe::non_oop_word());
978 return trampoline_call(Address(entry, rh));
979 }
980
981 int MacroAssembler::ic_check_size() {
3444
3445 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3446 if (decrement.is_register()) {
3447 sub(Rd, Rn, decrement.as_register());
3448 } else {
3449 sub(Rd, Rn, decrement.as_constant());
3450 }
3451 }
3452
3453 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3454 if (decrement.is_register()) {
3455 subw(Rd, Rn, decrement.as_register());
3456 } else {
3457 subw(Rd, Rn, decrement.as_constant());
3458 }
3459 }
3460
3461 void MacroAssembler::reinit_heapbase()
3462 {
3463 if (UseCompressedOops) {
3464 if (Universe::is_fully_initialized()) {
3465 mov(rheapbase, CompressedOops::base());
3466 } else {
3467 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3468 ldr(rheapbase, Address(rheapbase));
3469 }
3470 }
3471 }
3472
3473 // A generic CAS; success or failure is in the EQ flag. A weak CAS
3474 // doesn't retry and may fail spuriously. If the oldval is wanted,
3475 // Pass a register for the result, otherwise pass noreg.
3476
3477 // Clobbers rscratch1
3478 void MacroAssembler::cmpxchg(Register addr, Register expected,
3479 Register new_val,
3480 enum operand_size size,
3481 bool acquire, bool release,
3482 bool weak,
3483 Register result) {
3484 if (result == noreg) result = rscratch1;
5123 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5124 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5125 ldr(dst, Address(rmethod, Method::const_offset()));
5126 ldr(dst, Address(dst, ConstMethod::constants_offset()));
5127 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5128 ldr(dst, Address(dst, mirror_offset));
5129 resolve_oop_handle(dst, tmp1, tmp2);
5130 }
5131
5132 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5133 assert_different_registers(obj, klass, tmp);
5134 if (UseCompressedClassPointers) {
5135 if (UseCompactObjectHeaders) {
5136 load_narrow_klass_compact(tmp, obj);
5137 } else {
5138 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5139 }
5140 if (CompressedKlassPointers::base() == nullptr) {
5141 cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5142 return;
5143 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5144 && CompressedKlassPointers::shift() == 0) {
5145 // Only the bottom 32 bits matter
5146 cmpw(klass, tmp);
5147 return;
5148 }
5149 decode_klass_not_null(tmp);
5150 } else {
5151 ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5152 }
5153 cmp(klass, tmp);
5154 }
5155
5156 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5157 if (UseCompactObjectHeaders) {
5158 load_narrow_klass_compact(tmp1, obj1);
5159 load_narrow_klass_compact(tmp2, obj2);
5160 cmpw(tmp1, tmp2);
5161 } else if (UseCompressedClassPointers) {
5162 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5163 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5382 if (dst != src) {
5383 // we can load the base into dst, subtract it formthe src and shift down
5384 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5385 ldr(dst, dst);
5386 sub(dst, src, dst);
5387 lsr(dst, dst, shift);
5388 } else {
5389 // we need an extra register in order to load the coop base
5390 Register tmp = pick_different_tmp(dst, src);
5391 RegSet regs = RegSet::of(tmp);
5392 push(regs, sp);
5393 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5394 ldr(tmp, tmp);
5395 sub(dst, src, tmp);
5396 lsr(dst, dst, shift);
5397 pop(regs, sp);
5398 }
5399 }
5400
5401 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5402 if (AOTCodeCache::is_on_for_dump()) {
5403 encode_klass_not_null_for_aot(dst, src);
5404 return;
5405 }
5406
5407 switch (klass_decode_mode()) {
5408 case KlassDecodeZero:
5409 if (CompressedKlassPointers::shift() != 0) {
5410 lsr(dst, src, CompressedKlassPointers::shift());
5411 } else {
5412 if (dst != src) mov(dst, src);
5413 }
5414 break;
5415
5416 case KlassDecodeXor:
5417 if (CompressedKlassPointers::shift() != 0) {
5418 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5419 lsr(dst, dst, CompressedKlassPointers::shift());
5420 } else {
5421 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5422 }
5448 if (dst != src) {
5449 // we can load the base into dst then add the offset with a suitable shift
5450 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5451 ldr(dst, dst);
5452 add(dst, dst, src, LSL, shift);
5453 } else {
5454 // we need an extra register in order to load the coop base
5455 Register tmp = pick_different_tmp(dst, src);
5456 RegSet regs = RegSet::of(tmp);
5457 push(regs, sp);
5458 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5459 ldr(tmp, tmp);
5460 add(dst, tmp, src, LSL, shift);
5461 pop(regs, sp);
5462 }
5463 }
5464
5465 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5466 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5467
5468 if (AOTCodeCache::is_on_for_dump()) {
5469 decode_klass_not_null_for_aot(dst, src);
5470 return;
5471 }
5472
5473 switch (klass_decode_mode()) {
5474 case KlassDecodeZero:
5475 if (CompressedKlassPointers::shift() != 0) {
5476 lsl(dst, src, CompressedKlassPointers::shift());
5477 } else {
5478 if (dst != src) mov(dst, src);
5479 }
5480 break;
5481
5482 case KlassDecodeXor:
5483 if (CompressedKlassPointers::shift() != 0) {
5484 lsl(dst, src, CompressedKlassPointers::shift());
5485 eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5486 } else {
5487 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5488 }
5744 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
5745
5746 InstructionMark im(this);
5747 code_section()->relocate(inst_mark(), dest.rspec());
5748 // 8143067: Ensure that the adrp can reach the dest from anywhere within
5749 // the code cache so that if it is relocated we know it will still reach
5750 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5751 _adrp(reg1, dest.target());
5752 } else {
5753 uint64_t target = (uint64_t)dest.target();
5754 uint64_t adrp_target
5755 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5756
5757 _adrp(reg1, (address)adrp_target);
5758 movk(reg1, target >> 32, 32);
5759 }
5760 byte_offset = (uint64_t)dest.target() & 0xfff;
5761 }
5762
5763 void MacroAssembler::load_byte_map_base(Register reg) {
5764 CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
5765
5766 // Strictly speaking the card table base isn't an address at all, and it might
5767 // even be negative. It is thus materialised as a constant.
5768 mov(reg, (uint64_t)ctbs->card_table_base_const());
5769 }
5770
5771 void MacroAssembler::build_frame(int framesize) {
5772 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5773 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5774 protect_return_address();
5775 if (framesize < ((1 << 9) + 2 * wordSize)) {
5776 sub(sp, sp, framesize);
5777 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5778 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5779 } else {
5780 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5781 if (PreserveFramePointer) mov(rfp, sp);
5782 if (framesize < ((1 << 12) + 2 * wordSize))
5783 sub(sp, sp, framesize - 2 * wordSize);
5784 else {
5785 mov(rscratch1, framesize - 2 * wordSize);
5786 sub(sp, sp, rscratch1);
5787 }
5788 }
5789 verify_cross_modify_fence_not_required();
5790 }
|
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciUtilities.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/interpreterRuntime.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedKlass.inline.hpp"
49 #include "oops/compressedOops.inline.hpp"
305 uint32_t insn2 = insn_at(insn_addr, 1);
306 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
307 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
308 guarantee(((dest >> size) << size) == dest, "misaligned target");
309 return 2;
310 }
311 static int adrpAdd(address insn_addr, address &target) {
312 uintptr_t dest = (uintptr_t)target;
313 int offset_lo = dest & 0xfff;
314 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
315 return 2;
316 }
317 static int adrpMovk(address insn_addr, address &target) {
318 uintptr_t dest = uintptr_t(target);
319 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
320 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
321 target = address(dest);
322 return 2;
323 }
324 static int immediate(address insn_addr, address &target) {
325 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
326 // We encode narrow ones by setting the upper 16 bits in the first
327 // instruction.
328 if (Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010101) {
329 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
330 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
331 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
332 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
333 return 2;
334 }
335 assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
336 uint64_t dest = (uint64_t)target;
337 // Move wide constant
338 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
339 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
340 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
341 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
342 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
343 return 3;
344 }
345 static void verify(address insn_addr, address &target) {
346 #ifdef ASSERT
347 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
348 if (!(address_is == target)) {
349 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
350 disnm((intptr_t)insn_addr);
351 assert(address_is == target, "should be");
352 }
353 #endif
354 }
443 uint32_t insn2 = insn_at(insn_addr, 1);
444 uint64_t dest = uint64_t(target);
445 dest = (dest & 0xffff0000ffffffff) |
446 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
447 target = address(dest);
448
449 // We know the destination 4k page. Maybe we have a third
450 // instruction.
451 uint32_t insn = insn_at(insn_addr, 0);
452 uint32_t insn3 = insn_at(insn_addr, 2);
453 ptrdiff_t byte_offset;
454 if (offset_for(insn, insn3, byte_offset)) {
455 target += byte_offset;
456 return 3;
457 } else {
458 return 2;
459 }
460 }
461 static int immediate(address insn_addr, address &target) {
462 uint32_t *insns = (uint32_t *)insn_addr;
463 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
464 // We encode narrow ones by setting the upper 16 bits in the first
465 // instruction.
466 if (Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010101) {
467 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
468 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(insns[0], 20, 5)) << 16)
469 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
470 target = (address)CompressedKlassPointers::decode(nk);
471 return 2;
472 }
473 assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
474 // Move wide constant: movz, movk, movk. See movptr().
475 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
476 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
477 target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
478 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
479 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
480 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
481 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
482 return 3;
483 }
484 static void verify(address insn_addr, address &target) {
485 }
486 };
487
488 address MacroAssembler::target_addr_for_insn(address insn_addr) {
489 address target;
490 RelocActions<AArch64Decoder>::run(insn_addr, target);
491 return target;
492 }
958 // Max stub size: alignment nop, TrampolineStub.
959 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
960 }
961
962 void MacroAssembler::emit_static_call_stub() {
963 // CompiledDirectCall::set_to_interpreted knows the
964 // exact layout of this stub.
965
966 isb();
967 mov_metadata(rmethod, nullptr);
968
969 // Jump to the entry point of the c2i stub.
970 if (codestub_branch_needs_far_jump()) {
971 movptr(rscratch1, 0);
972 br(rscratch1);
973 } else {
974 b(pc());
975 }
976 }
977
978 int MacroAssembler::max_static_call_stub_size() {
979 // isb; movk; movz; movz; movk; movz; movz; br
980 return 8 * NativeInstruction::instruction_size;
981 }
982
983 void MacroAssembler::c2bool(Register x) {
984 // implements x == 0 ? 0 : 1
985 // note: must only look at least-significant byte of x
986 // since C-style booleans are stored in one byte
987 // only! (was bug)
988 tst(x, 0xff);
989 cset(x, Assembler::NE);
990 }
991
992 address MacroAssembler::ic_call(address entry, jint method_index) {
993 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
994 movptr(rscratch2, (intptr_t)Universe::non_oop_word());
995 return trampoline_call(Address(entry, rh));
996 }
997
998 int MacroAssembler::ic_check_size() {
3461
3462 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3463 if (decrement.is_register()) {
3464 sub(Rd, Rn, decrement.as_register());
3465 } else {
3466 sub(Rd, Rn, decrement.as_constant());
3467 }
3468 }
3469
3470 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3471 if (decrement.is_register()) {
3472 subw(Rd, Rn, decrement.as_register());
3473 } else {
3474 subw(Rd, Rn, decrement.as_constant());
3475 }
3476 }
3477
3478 void MacroAssembler::reinit_heapbase()
3479 {
3480 if (UseCompressedOops) {
3481 if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_dump()) {
3482 mov(rheapbase, CompressedOops::base());
3483 } else {
3484 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3485 ldr(rheapbase, Address(rheapbase));
3486 }
3487 }
3488 }
3489
3490 // A generic CAS; success or failure is in the EQ flag. A weak CAS
3491 // doesn't retry and may fail spuriously. If the oldval is wanted,
3492 // Pass a register for the result, otherwise pass noreg.
3493
3494 // Clobbers rscratch1
3495 void MacroAssembler::cmpxchg(Register addr, Register expected,
3496 Register new_val,
3497 enum operand_size size,
3498 bool acquire, bool release,
3499 bool weak,
3500 Register result) {
3501 if (result == noreg) result = rscratch1;
5140 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5141 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5142 ldr(dst, Address(rmethod, Method::const_offset()));
5143 ldr(dst, Address(dst, ConstMethod::constants_offset()));
5144 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5145 ldr(dst, Address(dst, mirror_offset));
5146 resolve_oop_handle(dst, tmp1, tmp2);
5147 }
5148
5149 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5150 assert_different_registers(obj, klass, tmp);
5151 if (UseCompressedClassPointers) {
5152 if (UseCompactObjectHeaders) {
5153 load_narrow_klass_compact(tmp, obj);
5154 } else {
5155 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5156 }
5157 if (CompressedKlassPointers::base() == nullptr) {
5158 cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5159 return;
5160 } else if (!AOTCodeCache::is_on_for_dump() &&
5161 ((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5162 && CompressedKlassPointers::shift() == 0) {
5163 // Only the bottom 32 bits matter
5164 cmpw(klass, tmp);
5165 return;
5166 }
5167 decode_klass_not_null(tmp);
5168 } else {
5169 ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5170 }
5171 cmp(klass, tmp);
5172 }
5173
5174 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5175 if (UseCompactObjectHeaders) {
5176 load_narrow_klass_compact(tmp1, obj1);
5177 load_narrow_klass_compact(tmp2, obj2);
5178 cmpw(tmp1, tmp2);
5179 } else if (UseCompressedClassPointers) {
5180 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5181 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5400 if (dst != src) {
5401 // we can load the base into dst, subtract it formthe src and shift down
5402 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5403 ldr(dst, dst);
5404 sub(dst, src, dst);
5405 lsr(dst, dst, shift);
5406 } else {
5407 // we need an extra register in order to load the coop base
5408 Register tmp = pick_different_tmp(dst, src);
5409 RegSet regs = RegSet::of(tmp);
5410 push(regs, sp);
5411 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5412 ldr(tmp, tmp);
5413 sub(dst, src, tmp);
5414 lsr(dst, dst, shift);
5415 pop(regs, sp);
5416 }
5417 }
5418
5419 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5420 if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5421 encode_klass_not_null_for_aot(dst, src);
5422 return;
5423 }
5424
5425 switch (klass_decode_mode()) {
5426 case KlassDecodeZero:
5427 if (CompressedKlassPointers::shift() != 0) {
5428 lsr(dst, src, CompressedKlassPointers::shift());
5429 } else {
5430 if (dst != src) mov(dst, src);
5431 }
5432 break;
5433
5434 case KlassDecodeXor:
5435 if (CompressedKlassPointers::shift() != 0) {
5436 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5437 lsr(dst, dst, CompressedKlassPointers::shift());
5438 } else {
5439 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5440 }
5466 if (dst != src) {
5467 // we can load the base into dst then add the offset with a suitable shift
5468 lea(dst, ExternalAddress(CompressedKlassPointers::base_addr()));
5469 ldr(dst, dst);
5470 add(dst, dst, src, LSL, shift);
5471 } else {
5472 // we need an extra register in order to load the coop base
5473 Register tmp = pick_different_tmp(dst, src);
5474 RegSet regs = RegSet::of(tmp);
5475 push(regs, sp);
5476 lea(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
5477 ldr(tmp, tmp);
5478 add(dst, tmp, src, LSL, shift);
5479 pop(regs, sp);
5480 }
5481 }
5482
5483 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5484 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5485
5486 if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
5487 decode_klass_not_null_for_aot(dst, src);
5488 return;
5489 }
5490
5491 switch (klass_decode_mode()) {
5492 case KlassDecodeZero:
5493 if (CompressedKlassPointers::shift() != 0) {
5494 lsl(dst, src, CompressedKlassPointers::shift());
5495 } else {
5496 if (dst != src) mov(dst, src);
5497 }
5498 break;
5499
5500 case KlassDecodeXor:
5501 if (CompressedKlassPointers::shift() != 0) {
5502 lsl(dst, src, CompressedKlassPointers::shift());
5503 eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5504 } else {
5505 eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5506 }
5762 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
5763
5764 InstructionMark im(this);
5765 code_section()->relocate(inst_mark(), dest.rspec());
5766 // 8143067: Ensure that the adrp can reach the dest from anywhere within
5767 // the code cache so that if it is relocated we know it will still reach
5768 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5769 _adrp(reg1, dest.target());
5770 } else {
5771 uint64_t target = (uint64_t)dest.target();
5772 uint64_t adrp_target
5773 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5774
5775 _adrp(reg1, (address)adrp_target);
5776 movk(reg1, target >> 32, 32);
5777 }
5778 byte_offset = (uint64_t)dest.target() & 0xfff;
5779 }
5780
5781 void MacroAssembler::load_byte_map_base(Register reg) {
5782 #if INCLUDE_CDS
5783 if (AOTCodeCache::is_on_for_dump()) {
5784 address byte_map_base_adr = AOTRuntimeConstants::card_table_address();
5785 lea(reg, ExternalAddress(byte_map_base_adr));
5786 ldr(reg, Address(reg));
5787 return;
5788 }
5789 #endif
5790 CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
5791
5792 // Strictly speaking the card table base isn't an address at all, and it might
5793 // even be negative. It is thus materialised as a constant.
5794 mov(reg, (uint64_t)ctbs->card_table_base_const());
5795 }
5796
5797 void MacroAssembler::load_aotrc_address(Register reg, address a) {
5798 #if INCLUDE_CDS
5799 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
5800 if (AOTCodeCache::is_on_for_dump()) {
5801 // all aotrc field addresses should be registered in the AOTCodeCache address table
5802 lea(reg, ExternalAddress(a));
5803 } else {
5804 mov(reg, (uint64_t)a);
5805 }
5806 #else
5807 ShouldNotReachHere();
5808 #endif
5809 }
5810
5811 void MacroAssembler::build_frame(int framesize) {
5812 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5813 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5814 protect_return_address();
5815 if (framesize < ((1 << 9) + 2 * wordSize)) {
5816 sub(sp, sp, framesize);
5817 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5818 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5819 } else {
5820 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5821 if (PreserveFramePointer) mov(rfp, sp);
5822 if (framesize < ((1 << 12) + 2 * wordSize))
5823 sub(sp, sp, framesize - 2 * wordSize);
5824 else {
5825 mov(rscratch1, framesize - 2 * wordSize);
5826 sub(sp, sp, rscratch1);
5827 }
5828 }
5829 verify_cross_modify_fence_not_required();
5830 }
|