10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/klass.inline.hpp"
331 uint32_t insn2 = insn_at(insn_addr, 1);
332 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
333 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
334 guarantee(((dest >> size) << size) == dest, "misaligned target");
335 return 2;
336 }
337 static int adrpAdd_impl(address insn_addr, address &target) {
338 uintptr_t dest = (uintptr_t)target;
339 int offset_lo = dest & 0xfff;
340 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
341 return 2;
342 }
343 static int adrpMovk_impl(address insn_addr, address &target) {
344 uintptr_t dest = uintptr_t(target);
345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
346 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
347 target = address(dest);
348 return 2;
349 }
350 virtual int immediate(address insn_addr, address &target) {
351 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
352 uint64_t dest = (uint64_t)target;
353 // Move wide constant
354 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
355 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
356 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
357 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
358 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
359 return 3;
360 }
361 virtual void verify(address insn_addr, address &target) {
362 #ifdef ASSERT
363 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
364 if (!(address_is == target)) {
365 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
366 disnm((intptr_t)insn_addr);
367 assert(address_is == target, "should be");
368 }
369 #endif
370 }
461 uint32_t insn2 = insn_at(insn_addr, 1);
462 uint64_t dest = uint64_t(target);
463 dest = (dest & 0xffff0000ffffffff) |
464 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
465 target = address(dest);
466
467 // We know the destination 4k page. Maybe we have a third
468 // instruction.
469 uint32_t insn = insn_at(insn_addr, 0);
470 uint32_t insn3 = insn_at(insn_addr, 2);
471 ptrdiff_t byte_offset;
472 if (offset_for(insn, insn3, byte_offset)) {
473 target += byte_offset;
474 return 3;
475 } else {
476 return 2;
477 }
478 }
479 virtual int immediate(address insn_addr, address &target) {
480 uint32_t *insns = (uint32_t *)insn_addr;
481 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
482 // Move wide constant: movz, movk, movk. See movptr().
483 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
484 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
485 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
486 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
487 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
488 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
489 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
490 return 3;
491 }
492 virtual void verify(address insn_addr, address &target) {
493 }
494 };
495
496 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
497 AArch64Decoder decoder(insn_addr, insn);
498 address target;
499 decoder.run(insn_addr, target);
500 return target;
658 JavaThread::frame_anchor_offset()
659 + JavaFrameAnchor::last_Java_pc_offset()));
660
661 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
662 }
663
664 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
665 Register last_java_fp,
666 Label &L,
667 Register scratch) {
668 if (L.is_bound()) {
669 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
670 } else {
671 InstructionMark im(this);
672 L.add_patch_at(code(), locator());
673 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
674 }
675 }
676
677 static inline bool target_needs_far_branch(address addr) {
678 // codecache size <= 128M
679 if (!MacroAssembler::far_branches()) {
680 return false;
681 }
682 // codecache size > 240M
683 if (MacroAssembler::codestub_branch_needs_far_jump()) {
684 return true;
685 }
686 // codecache size: 128M..240M
687 return !CodeCache::is_non_nmethod(addr);
688 }
689
690 void MacroAssembler::far_call(Address entry, Register tmp) {
691 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
692 assert(CodeCache::find_blob(entry.target()) != nullptr,
693 "destination of far call not found in code cache");
694 assert(entry.rspec().type() == relocInfo::external_word_type
695 || entry.rspec().type() == relocInfo::runtime_call_type
696 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
697 if (target_needs_far_branch(entry.target())) {
833 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
834 Label ok;
835 cbz(rscratch1, ok);
836 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
837 br(rscratch1);
838 bind(ok);
839 }
840
841 // get oop result if there is one and reset the value in the thread
842 if (oop_result->is_valid()) {
843 get_vm_result(oop_result, java_thread);
844 }
845 }
846
847 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
848 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
849 }
850
851 // Check the entry target is always reachable from any branch.
852 static bool is_always_within_branch_range(Address entry) {
853 const address target = entry.target();
854
855 if (!CodeCache::contains(target)) {
856 // We always use trampolines for callees outside CodeCache.
857 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
858 return false;
859 }
860
861 if (!MacroAssembler::far_branches()) {
862 return true;
863 }
864
865 if (entry.rspec().type() == relocInfo::runtime_call_type) {
866 // Runtime calls are calls of a non-compiled method (stubs, adapters).
867 // Non-compiled methods stay forever in CodeCache.
868 // We check whether the longest possible branch is within the branch range.
869 assert(CodeCache::find_blob(target) != nullptr &&
870 !CodeCache::find_blob(target)->is_nmethod(),
871 "runtime call of compiled method");
872 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
3016
3017 #ifdef ASSERT
3018 {
3019 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3020 Label valid_global_tag;
3021 tbnz(value, 1, valid_global_tag); // Test for global tag
3022 stop("non global jobject using resolve_global_jobject");
3023 bind(valid_global_tag);
3024 }
3025 #endif
3026
3027 // Resolve global handle
3028 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3029 verify_oop(value);
3030
3031 bind(done);
3032 }
3033
3034 void MacroAssembler::stop(const char* msg) {
3035 BLOCK_COMMENT(msg);
3036 dcps1(0xdeae);
3037 emit_int64((uintptr_t)msg);
3038 }
3039
3040 void MacroAssembler::unimplemented(const char* what) {
3041 const char* buf = nullptr;
3042 {
3043 ResourceMark rm;
3044 stringStream ss;
3045 ss.print("unimplemented: %s", what);
3046 buf = code_string(ss.as_string());
3047 }
3048 stop(buf);
3049 }
3050
3051 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3052 #ifdef ASSERT
3053 Label OK;
3054 br(cc, OK);
3055 stop(msg);
3056 bind(OK);
3057 #endif
3115
3116 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3117 if (decrement.is_register()) {
3118 sub(Rd, Rn, decrement.as_register());
3119 } else {
3120 sub(Rd, Rn, decrement.as_constant());
3121 }
3122 }
3123
3124 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3125 if (decrement.is_register()) {
3126 subw(Rd, Rn, decrement.as_register());
3127 } else {
3128 subw(Rd, Rn, decrement.as_constant());
3129 }
3130 }
3131
3132 void MacroAssembler::reinit_heapbase()
3133 {
3134 if (UseCompressedOops) {
3135 if (Universe::is_fully_initialized()) {
3136 mov(rheapbase, CompressedOops::ptrs_base());
3137 } else {
3138 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
3139 ldr(rheapbase, Address(rheapbase));
3140 }
3141 }
3142 }
3143
3144 // this simulates the behaviour of the x86 cmpxchg instruction using a
3145 // load linked/store conditional pair. we use the acquire/release
3146 // versions of these instructions so that we flush pending writes as
3147 // per Java semantics.
3148
3149 // n.b the x86 version assumes the old value to be compared against is
3150 // in rax and updates rax with the value located in memory if the
3151 // cmpxchg fails. we supply a register for the old value explicitly
3152
3153 // the aarch64 load linked/store conditional instructions do not
3154 // accept an offset. so, unlike x86, we must provide a plain register
3155 // to identify the memory word to be compared/exchanged rather than a
5418 // the code cache so that if it is relocated we know it will still reach
5419 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5420 _adrp(reg1, dest.target());
5421 } else {
5422 uint64_t target = (uint64_t)dest.target();
5423 uint64_t adrp_target
5424 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5425
5426 _adrp(reg1, (address)adrp_target);
5427 movk(reg1, target >> 32, 32);
5428 }
5429 byte_offset = (uint64_t)dest.target() & 0xfff;
5430 }
5431
5432 void MacroAssembler::load_byte_map_base(Register reg) {
5433 CardTable::CardValue* byte_map_base =
5434 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5435
5436 // Strictly speaking the byte_map_base isn't an address at all, and it might
5437 // even be negative. It is thus materialised as a constant.
5438 mov(reg, (uint64_t)byte_map_base);
5439 }
5440
5441 void MacroAssembler::build_frame(int framesize) {
5442 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5443 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5444 protect_return_address();
5445 if (framesize < ((1 << 9) + 2 * wordSize)) {
5446 sub(sp, sp, framesize);
5447 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5448 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5449 } else {
5450 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5451 if (PreserveFramePointer) mov(rfp, sp);
5452 if (framesize < ((1 << 12) + 2 * wordSize))
5453 sub(sp, sp, framesize - 2 * wordSize);
5454 else {
5455 mov(rscratch1, framesize - 2 * wordSize);
5456 sub(sp, sp, rscratch1);
5457 }
5458 }
|
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "ci/ciUtilities.hpp"
31 #include "code/compiledIC.hpp"
32 #include "compiler/compileTask.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/barrierSetAssembler.hpp"
37 #include "gc/shared/cardTableBarrierSet.hpp"
38 #include "gc/shared/cardTable.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/tlab_globals.hpp"
41 #include "interpreter/bytecodeHistogram.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedKlass.inline.hpp"
49 #include "oops/compressedOops.inline.hpp"
50 #include "oops/klass.inline.hpp"
332 uint32_t insn2 = insn_at(insn_addr, 1);
333 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
334 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
335 guarantee(((dest >> size) << size) == dest, "misaligned target");
336 return 2;
337 }
338 static int adrpAdd_impl(address insn_addr, address &target) {
339 uintptr_t dest = (uintptr_t)target;
340 int offset_lo = dest & 0xfff;
341 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
342 return 2;
343 }
344 static int adrpMovk_impl(address insn_addr, address &target) {
345 uintptr_t dest = uintptr_t(target);
346 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
347 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
348 target = address(dest);
349 return 2;
350 }
351 virtual int immediate(address insn_addr, address &target) {
352 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
353 // We encode narrow ones by setting the upper 16 bits in the first
354 // instruction.
355 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
356 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
357 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
358 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
359 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
360 return 2;
361 }
362 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
363 uint64_t dest = (uint64_t)target;
364 // Move wide constant
365 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
366 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
367 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
368 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
369 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
370 return 3;
371 }
372 virtual void verify(address insn_addr, address &target) {
373 #ifdef ASSERT
374 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
375 if (!(address_is == target)) {
376 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
377 disnm((intptr_t)insn_addr);
378 assert(address_is == target, "should be");
379 }
380 #endif
381 }
472 uint32_t insn2 = insn_at(insn_addr, 1);
473 uint64_t dest = uint64_t(target);
474 dest = (dest & 0xffff0000ffffffff) |
475 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
476 target = address(dest);
477
478 // We know the destination 4k page. Maybe we have a third
479 // instruction.
480 uint32_t insn = insn_at(insn_addr, 0);
481 uint32_t insn3 = insn_at(insn_addr, 2);
482 ptrdiff_t byte_offset;
483 if (offset_for(insn, insn3, byte_offset)) {
484 target += byte_offset;
485 return 3;
486 } else {
487 return 2;
488 }
489 }
490 virtual int immediate(address insn_addr, address &target) {
491 uint32_t *insns = (uint32_t *)insn_addr;
492 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
493 // We encode narrow ones by setting the upper 16 bits in the first
494 // instruction.
495 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
496 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
497 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16)
498 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
499 target = (address)CompressedKlassPointers::decode(nk);
500 return 2;
501 }
502 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
503 // Move wide constant: movz, movk, movk. See movptr().
504 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
505 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
506 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
507 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
508 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
509 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
510 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
511 return 3;
512 }
513 virtual void verify(address insn_addr, address &target) {
514 }
515 };
516
517 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
518 AArch64Decoder decoder(insn_addr, insn);
519 address target;
520 decoder.run(insn_addr, target);
521 return target;
679 JavaThread::frame_anchor_offset()
680 + JavaFrameAnchor::last_Java_pc_offset()));
681
682 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
683 }
684
685 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
686 Register last_java_fp,
687 Label &L,
688 Register scratch) {
689 if (L.is_bound()) {
690 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
691 } else {
692 InstructionMark im(this);
693 L.add_patch_at(code(), locator());
694 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
695 }
696 }
697
698 static inline bool target_needs_far_branch(address addr) {
699 if (SCCache::is_on_for_write()) {
700 return true;
701 }
702 // codecache size <= 128M
703 if (!MacroAssembler::far_branches()) {
704 return false;
705 }
706 // codecache size > 240M
707 if (MacroAssembler::codestub_branch_needs_far_jump()) {
708 return true;
709 }
710 // codecache size: 128M..240M
711 return !CodeCache::is_non_nmethod(addr);
712 }
713
714 void MacroAssembler::far_call(Address entry, Register tmp) {
715 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
716 assert(CodeCache::find_blob(entry.target()) != nullptr,
717 "destination of far call not found in code cache");
718 assert(entry.rspec().type() == relocInfo::external_word_type
719 || entry.rspec().type() == relocInfo::runtime_call_type
720 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
721 if (target_needs_far_branch(entry.target())) {
857 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
858 Label ok;
859 cbz(rscratch1, ok);
860 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
861 br(rscratch1);
862 bind(ok);
863 }
864
865 // get oop result if there is one and reset the value in the thread
866 if (oop_result->is_valid()) {
867 get_vm_result(oop_result, java_thread);
868 }
869 }
870
871 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
872 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
873 }
874
875 // Check the entry target is always reachable from any branch.
876 static bool is_always_within_branch_range(Address entry) {
877 if (SCCache::is_on_for_write()) {
878 return false;
879 }
880 const address target = entry.target();
881
882 if (!CodeCache::contains(target)) {
883 // We always use trampolines for callees outside CodeCache.
884 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
885 return false;
886 }
887
888 if (!MacroAssembler::far_branches()) {
889 return true;
890 }
891
892 if (entry.rspec().type() == relocInfo::runtime_call_type) {
893 // Runtime calls are calls of a non-compiled method (stubs, adapters).
894 // Non-compiled methods stay forever in CodeCache.
895 // We check whether the longest possible branch is within the branch range.
896 assert(CodeCache::find_blob(target) != nullptr &&
897 !CodeCache::find_blob(target)->is_nmethod(),
898 "runtime call of compiled method");
899 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
3043
3044 #ifdef ASSERT
3045 {
3046 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3047 Label valid_global_tag;
3048 tbnz(value, 1, valid_global_tag); // Test for global tag
3049 stop("non global jobject using resolve_global_jobject");
3050 bind(valid_global_tag);
3051 }
3052 #endif
3053
3054 // Resolve global handle
3055 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3056 verify_oop(value);
3057
3058 bind(done);
3059 }
3060
3061 void MacroAssembler::stop(const char* msg) {
3062 BLOCK_COMMENT(msg);
3063 // load msg into r0 so we can access it from the signal handler
3064 // ExternalAddress enables saving and restoring via the code cache
3065 lea(c_rarg0, ExternalAddress((address) msg));
3066 dcps1(0xdeae);
3067 SCCache::add_C_string(msg);
3068 }
3069
3070 void MacroAssembler::unimplemented(const char* what) {
3071 const char* buf = nullptr;
3072 {
3073 ResourceMark rm;
3074 stringStream ss;
3075 ss.print("unimplemented: %s", what);
3076 buf = code_string(ss.as_string());
3077 }
3078 stop(buf);
3079 }
3080
3081 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3082 #ifdef ASSERT
3083 Label OK;
3084 br(cc, OK);
3085 stop(msg);
3086 bind(OK);
3087 #endif
3145
3146 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3147 if (decrement.is_register()) {
3148 sub(Rd, Rn, decrement.as_register());
3149 } else {
3150 sub(Rd, Rn, decrement.as_constant());
3151 }
3152 }
3153
3154 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3155 if (decrement.is_register()) {
3156 subw(Rd, Rn, decrement.as_register());
3157 } else {
3158 subw(Rd, Rn, decrement.as_constant());
3159 }
3160 }
3161
3162 void MacroAssembler::reinit_heapbase()
3163 {
3164 if (UseCompressedOops) {
3165 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) {
3166 mov(rheapbase, CompressedOops::ptrs_base());
3167 } else {
3168 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
3169 ldr(rheapbase, Address(rheapbase));
3170 }
3171 }
3172 }
3173
3174 // this simulates the behaviour of the x86 cmpxchg instruction using a
3175 // load linked/store conditional pair. we use the acquire/release
3176 // versions of these instructions so that we flush pending writes as
3177 // per Java semantics.
3178
3179 // n.b the x86 version assumes the old value to be compared against is
3180 // in rax and updates rax with the value located in memory if the
3181 // cmpxchg fails. we supply a register for the old value explicitly
3182
3183 // the aarch64 load linked/store conditional instructions do not
3184 // accept an offset. so, unlike x86, we must provide a plain register
3185 // to identify the memory word to be compared/exchanged rather than a
5448 // the code cache so that if it is relocated we know it will still reach
5449 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5450 _adrp(reg1, dest.target());
5451 } else {
5452 uint64_t target = (uint64_t)dest.target();
5453 uint64_t adrp_target
5454 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5455
5456 _adrp(reg1, (address)adrp_target);
5457 movk(reg1, target >> 32, 32);
5458 }
5459 byte_offset = (uint64_t)dest.target() & 0xfff;
5460 }
5461
5462 void MacroAssembler::load_byte_map_base(Register reg) {
5463 CardTable::CardValue* byte_map_base =
5464 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5465
5466 // Strictly speaking the byte_map_base isn't an address at all, and it might
5467 // even be negative. It is thus materialised as a constant.
5468 if (SCCache::is_on_for_write()) {
5469 // SCA needs relocation info for card table base
5470 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base)));
5471 } else {
5472 mov(reg, (uint64_t)byte_map_base);
5473 }
5474 }
5475
5476 void MacroAssembler::build_frame(int framesize) {
5477 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5478 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5479 protect_return_address();
5480 if (framesize < ((1 << 9) + 2 * wordSize)) {
5481 sub(sp, sp, framesize);
5482 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5483 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5484 } else {
5485 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5486 if (PreserveFramePointer) mov(rfp, sp);
5487 if (framesize < ((1 << 12) + 2 * wordSize))
5488 sub(sp, sp, framesize - 2 * wordSize);
5489 else {
5490 mov(rscratch1, framesize - 2 * wordSize);
5491 sub(sp, sp, rscratch1);
5492 }
5493 }
|