10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/klass.inline.hpp"
331 uint32_t insn2 = insn_at(insn_addr, 1);
332 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
333 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
334 guarantee(((dest >> size) << size) == dest, "misaligned target");
335 return 2;
336 }
337 static int adrpAdd_impl(address insn_addr, address &target) {
338 uintptr_t dest = (uintptr_t)target;
339 int offset_lo = dest & 0xfff;
340 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
341 return 2;
342 }
343 static int adrpMovk_impl(address insn_addr, address &target) {
344 uintptr_t dest = uintptr_t(target);
345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
346 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
347 target = address(dest);
348 return 2;
349 }
350 virtual int immediate(address insn_addr, address &target) {
351 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
352 uint64_t dest = (uint64_t)target;
353 // Move wide constant
354 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
355 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
356 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
357 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
358 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
359 return 3;
360 }
361 virtual void verify(address insn_addr, address &target) {
362 #ifdef ASSERT
363 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
364 if (!(address_is == target)) {
365 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
366 disnm((intptr_t)insn_addr);
367 assert(address_is == target, "should be");
368 }
369 #endif
370 }
461 uint32_t insn2 = insn_at(insn_addr, 1);
462 uint64_t dest = uint64_t(target);
463 dest = (dest & 0xffff0000ffffffff) |
464 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
465 target = address(dest);
466
467 // We know the destination 4k page. Maybe we have a third
468 // instruction.
469 uint32_t insn = insn_at(insn_addr, 0);
470 uint32_t insn3 = insn_at(insn_addr, 2);
471 ptrdiff_t byte_offset;
472 if (offset_for(insn, insn3, byte_offset)) {
473 target += byte_offset;
474 return 3;
475 } else {
476 return 2;
477 }
478 }
479 virtual int immediate(address insn_addr, address &target) {
480 uint32_t *insns = (uint32_t *)insn_addr;
481 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
482 // Move wide constant: movz, movk, movk. See movptr().
483 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
484 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
485 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
486 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
487 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
488 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
489 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
490 return 3;
491 }
492 virtual void verify(address insn_addr, address &target) {
493 }
494 };
495
496 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
497 AArch64Decoder decoder(insn_addr, insn);
498 address target;
499 decoder.run(insn_addr, target);
500 return target;
658 JavaThread::frame_anchor_offset()
659 + JavaFrameAnchor::last_Java_pc_offset()));
660
661 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
662 }
663
664 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
665 Register last_java_fp,
666 Label &L,
667 Register scratch) {
668 if (L.is_bound()) {
669 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
670 } else {
671 InstructionMark im(this);
672 L.add_patch_at(code(), locator());
673 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
674 }
675 }
676
677 static inline bool target_needs_far_branch(address addr) {
678 // codecache size <= 128M
679 if (!MacroAssembler::far_branches()) {
680 return false;
681 }
682 // codecache size > 240M
683 if (MacroAssembler::codestub_branch_needs_far_jump()) {
684 return true;
685 }
686 // codecache size: 128M..240M
687 return !CodeCache::is_non_nmethod(addr);
688 }
689
690 void MacroAssembler::far_call(Address entry, Register tmp) {
691 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
692 assert(CodeCache::find_blob(entry.target()) != nullptr,
693 "destination of far call not found in code cache");
694 assert(entry.rspec().type() == relocInfo::external_word_type
695 || entry.rspec().type() == relocInfo::runtime_call_type
696 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
697 if (target_needs_far_branch(entry.target())) {
2994
2995 #ifdef ASSERT
2996 {
2997 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
2998 Label valid_global_tag;
2999 tbnz(value, 1, valid_global_tag); // Test for global tag
3000 stop("non global jobject using resolve_global_jobject");
3001 bind(valid_global_tag);
3002 }
3003 #endif
3004
3005 // Resolve global handle
3006 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3007 verify_oop(value);
3008
3009 bind(done);
3010 }
3011
3012 void MacroAssembler::stop(const char* msg) {
3013 BLOCK_COMMENT(msg);
3014 dcps1(0xdeae);
3015 emit_int64((uintptr_t)msg);
3016 }
3017
3018 void MacroAssembler::unimplemented(const char* what) {
3019 const char* buf = nullptr;
3020 {
3021 ResourceMark rm;
3022 stringStream ss;
3023 ss.print("unimplemented: %s", what);
3024 buf = code_string(ss.as_string());
3025 }
3026 stop(buf);
3027 }
3028
3029 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3030 #ifdef ASSERT
3031 Label OK;
3032 br(cc, OK);
3033 stop(msg);
3034 bind(OK);
3035 #endif
3093
3094 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3095 if (decrement.is_register()) {
3096 sub(Rd, Rn, decrement.as_register());
3097 } else {
3098 sub(Rd, Rn, decrement.as_constant());
3099 }
3100 }
3101
3102 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3103 if (decrement.is_register()) {
3104 subw(Rd, Rn, decrement.as_register());
3105 } else {
3106 subw(Rd, Rn, decrement.as_constant());
3107 }
3108 }
3109
3110 void MacroAssembler::reinit_heapbase()
3111 {
3112 if (UseCompressedOops) {
3113 if (Universe::is_fully_initialized()) {
3114 mov(rheapbase, CompressedOops::ptrs_base());
3115 } else {
3116 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
3117 ldr(rheapbase, Address(rheapbase));
3118 }
3119 }
3120 }
3121
3122 // this simulates the behaviour of the x86 cmpxchg instruction using a
3123 // load linked/store conditional pair. we use the acquire/release
3124 // versions of these instructions so that we flush pending writes as
3125 // per Java semantics.
3126
3127 // n.b the x86 version assumes the old value to be compared against is
3128 // in rax and updates rax with the value located in memory if the
3129 // cmpxchg fails. we supply a register for the old value explicitly
3130
3131 // the aarch64 load linked/store conditional instructions do not
3132 // accept an offset. so, unlike x86, we must provide a plain register
3133 // to identify the memory word to be compared/exchanged rather than a
5396 // the code cache so that if it is relocated we know it will still reach
5397 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5398 _adrp(reg1, dest.target());
5399 } else {
5400 uint64_t target = (uint64_t)dest.target();
5401 uint64_t adrp_target
5402 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5403
5404 _adrp(reg1, (address)adrp_target);
5405 movk(reg1, target >> 32, 32);
5406 }
5407 byte_offset = (uint64_t)dest.target() & 0xfff;
5408 }
5409
5410 void MacroAssembler::load_byte_map_base(Register reg) {
5411 CardTable::CardValue* byte_map_base =
5412 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5413
5414 // Strictly speaking the byte_map_base isn't an address at all, and it might
5415 // even be negative. It is thus materialised as a constant.
5416 mov(reg, (uint64_t)byte_map_base);
5417 }
5418
5419 void MacroAssembler::build_frame(int framesize) {
5420 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5421 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5422 protect_return_address();
5423 if (framesize < ((1 << 9) + 2 * wordSize)) {
5424 sub(sp, sp, framesize);
5425 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5426 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5427 } else {
5428 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5429 if (PreserveFramePointer) mov(rfp, sp);
5430 if (framesize < ((1 << 12) + 2 * wordSize))
5431 sub(sp, sp, framesize - 2 * wordSize);
5432 else {
5433 mov(rscratch1, framesize - 2 * wordSize);
5434 sub(sp, sp, rscratch1);
5435 }
5436 }
|
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "ci/ciUtilities.hpp"
31 #include "code/SCCache.hpp"
32 #include "code/compiledIC.hpp"
33 #include "compiler/compileTask.hpp"
34 #include "compiler/disassembler.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/shared/barrierSet.hpp"
37 #include "gc/shared/barrierSetAssembler.hpp"
38 #include "gc/shared/cardTableBarrierSet.hpp"
39 #include "gc/shared/cardTable.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/tlab_globals.hpp"
42 #include "interpreter/bytecodeHistogram.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "jvm.h"
45 #include "memory/resourceArea.hpp"
46 #include "memory/universe.hpp"
47 #include "nativeInst_aarch64.hpp"
48 #include "oops/accessDecorators.hpp"
49 #include "oops/compressedKlass.inline.hpp"
50 #include "oops/compressedOops.inline.hpp"
51 #include "oops/klass.inline.hpp"
333 uint32_t insn2 = insn_at(insn_addr, 1);
334 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
335 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
336 guarantee(((dest >> size) << size) == dest, "misaligned target");
337 return 2;
338 }
339 static int adrpAdd_impl(address insn_addr, address &target) {
340 uintptr_t dest = (uintptr_t)target;
341 int offset_lo = dest & 0xfff;
342 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
343 return 2;
344 }
345 static int adrpMovk_impl(address insn_addr, address &target) {
346 uintptr_t dest = uintptr_t(target);
347 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
348 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
349 target = address(dest);
350 return 2;
351 }
352 virtual int immediate(address insn_addr, address &target) {
353 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
354 // We encode narrow ones by setting the upper 16 bits in the first
355 // instruction.
356 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
357 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
358 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
359 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
360 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
361 return 2;
362 }
363 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
364 uint64_t dest = (uint64_t)target;
365 // Move wide constant
366 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
367 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
368 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
369 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
370 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
371 return 3;
372 }
373 virtual void verify(address insn_addr, address &target) {
374 #ifdef ASSERT
375 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
376 if (!(address_is == target)) {
377 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
378 disnm((intptr_t)insn_addr);
379 assert(address_is == target, "should be");
380 }
381 #endif
382 }
473 uint32_t insn2 = insn_at(insn_addr, 1);
474 uint64_t dest = uint64_t(target);
475 dest = (dest & 0xffff0000ffffffff) |
476 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
477 target = address(dest);
478
479 // We know the destination 4k page. Maybe we have a third
480 // instruction.
481 uint32_t insn = insn_at(insn_addr, 0);
482 uint32_t insn3 = insn_at(insn_addr, 2);
483 ptrdiff_t byte_offset;
484 if (offset_for(insn, insn3, byte_offset)) {
485 target += byte_offset;
486 return 3;
487 } else {
488 return 2;
489 }
490 }
491 virtual int immediate(address insn_addr, address &target) {
492 uint32_t *insns = (uint32_t *)insn_addr;
493 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
494 // We encode narrow ones by setting the upper 16 bits in the first
495 // instruction.
496 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
497 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
498 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16)
499 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
500 target = (address)CompressedKlassPointers::decode(nk);
501 return 2;
502 }
503 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
504 // Move wide constant: movz, movk, movk. See movptr().
505 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
506 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
507 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
508 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
509 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
510 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
511 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
512 return 3;
513 }
514 virtual void verify(address insn_addr, address &target) {
515 }
516 };
517
518 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
519 AArch64Decoder decoder(insn_addr, insn);
520 address target;
521 decoder.run(insn_addr, target);
522 return target;
680 JavaThread::frame_anchor_offset()
681 + JavaFrameAnchor::last_Java_pc_offset()));
682
683 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
684 }
685
686 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
687 Register last_java_fp,
688 Label &L,
689 Register scratch) {
690 if (L.is_bound()) {
691 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
692 } else {
693 InstructionMark im(this);
694 L.add_patch_at(code(), locator());
695 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
696 }
697 }
698
699 static inline bool target_needs_far_branch(address addr) {
700 if (SCCache::is_on_for_write()) {
701 return true;
702 }
703 // codecache size <= 128M
704 if (!MacroAssembler::far_branches()) {
705 return false;
706 }
707 // codecache size > 240M
708 if (MacroAssembler::codestub_branch_needs_far_jump()) {
709 return true;
710 }
711 // codecache size: 128M..240M
712 return !CodeCache::is_non_nmethod(addr);
713 }
714
715 void MacroAssembler::far_call(Address entry, Register tmp) {
716 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
717 assert(CodeCache::find_blob(entry.target()) != nullptr,
718 "destination of far call not found in code cache");
719 assert(entry.rspec().type() == relocInfo::external_word_type
720 || entry.rspec().type() == relocInfo::runtime_call_type
721 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
722 if (target_needs_far_branch(entry.target())) {
3019
3020 #ifdef ASSERT
3021 {
3022 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3023 Label valid_global_tag;
3024 tbnz(value, 1, valid_global_tag); // Test for global tag
3025 stop("non global jobject using resolve_global_jobject");
3026 bind(valid_global_tag);
3027 }
3028 #endif
3029
3030 // Resolve global handle
3031 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3032 verify_oop(value);
3033
3034 bind(done);
3035 }
3036
3037 void MacroAssembler::stop(const char* msg) {
3038 BLOCK_COMMENT(msg);
3039 // load msg into r0 so we can access it from the signal handler
3040 // ExternalAddress enables saving and restoring via the code cache
3041 lea(c_rarg0, ExternalAddress((address) msg));
3042 dcps1(0xdeae);
3043 SCCache::add_C_string(msg);
3044 }
3045
3046 void MacroAssembler::unimplemented(const char* what) {
3047 const char* buf = nullptr;
3048 {
3049 ResourceMark rm;
3050 stringStream ss;
3051 ss.print("unimplemented: %s", what);
3052 buf = code_string(ss.as_string());
3053 }
3054 stop(buf);
3055 }
3056
3057 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3058 #ifdef ASSERT
3059 Label OK;
3060 br(cc, OK);
3061 stop(msg);
3062 bind(OK);
3063 #endif
3121
3122 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3123 if (decrement.is_register()) {
3124 sub(Rd, Rn, decrement.as_register());
3125 } else {
3126 sub(Rd, Rn, decrement.as_constant());
3127 }
3128 }
3129
3130 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3131 if (decrement.is_register()) {
3132 subw(Rd, Rn, decrement.as_register());
3133 } else {
3134 subw(Rd, Rn, decrement.as_constant());
3135 }
3136 }
3137
3138 void MacroAssembler::reinit_heapbase()
3139 {
3140 if (UseCompressedOops) {
3141 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) {
3142 mov(rheapbase, CompressedOops::ptrs_base());
3143 } else {
3144 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
3145 ldr(rheapbase, Address(rheapbase));
3146 }
3147 }
3148 }
3149
3150 // this simulates the behaviour of the x86 cmpxchg instruction using a
3151 // load linked/store conditional pair. we use the acquire/release
3152 // versions of these instructions so that we flush pending writes as
3153 // per Java semantics.
3154
3155 // n.b the x86 version assumes the old value to be compared against is
3156 // in rax and updates rax with the value located in memory if the
3157 // cmpxchg fails. we supply a register for the old value explicitly
3158
3159 // the aarch64 load linked/store conditional instructions do not
3160 // accept an offset. so, unlike x86, we must provide a plain register
3161 // to identify the memory word to be compared/exchanged rather than a
5424 // the code cache so that if it is relocated we know it will still reach
5425 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5426 _adrp(reg1, dest.target());
5427 } else {
5428 uint64_t target = (uint64_t)dest.target();
5429 uint64_t adrp_target
5430 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5431
5432 _adrp(reg1, (address)adrp_target);
5433 movk(reg1, target >> 32, 32);
5434 }
5435 byte_offset = (uint64_t)dest.target() & 0xfff;
5436 }
5437
5438 void MacroAssembler::load_byte_map_base(Register reg) {
5439 CardTable::CardValue* byte_map_base =
5440 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5441
5442 // Strictly speaking the byte_map_base isn't an address at all, and it might
5443 // even be negative. It is thus materialised as a constant.
5444 if (SCCache::is_on_for_write()) {
5445 // SCA needs relocation info for card table base
5446 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base)));
5447 } else {
5448 mov(reg, (uint64_t)byte_map_base);
5449 }
5450 }
5451
5452 void MacroAssembler::build_frame(int framesize) {
5453 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5454 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5455 protect_return_address();
5456 if (framesize < ((1 << 9) + 2 * wordSize)) {
5457 sub(sp, sp, framesize);
5458 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5459 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5460 } else {
5461 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5462 if (PreserveFramePointer) mov(rfp, sp);
5463 if (framesize < ((1 << 12) + 2 * wordSize))
5464 sub(sp, sp, framesize - 2 * wordSize);
5465 else {
5466 mov(rscratch1, framesize - 2 * wordSize);
5467 sub(sp, sp, rscratch1);
5468 }
5469 }
|