10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/klass.inline.hpp"
50 #include "runtime/continuation.hpp"
331 uint32_t insn2 = insn_at(insn_addr, 1);
332 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
333 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
334 guarantee(((dest >> size) << size) == dest, "misaligned target");
335 return 2;
336 }
337 static int adrpAdd_impl(address insn_addr, address &target) {
338 uintptr_t dest = (uintptr_t)target;
339 int offset_lo = dest & 0xfff;
340 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
341 return 2;
342 }
343 static int adrpMovk_impl(address insn_addr, address &target) {
344 uintptr_t dest = uintptr_t(target);
345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
346 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
347 target = address(dest);
348 return 2;
349 }
350 virtual int immediate(address insn_addr, address &target) {
351 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
352 uint64_t dest = (uint64_t)target;
353 // Move wide constant
354 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
355 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
356 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
357 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
358 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
359 return 3;
360 }
361 virtual void verify(address insn_addr, address &target) {
362 #ifdef ASSERT
363 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
364 if (!(address_is == target)) {
365 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
366 disnm((intptr_t)insn_addr);
367 assert(address_is == target, "should be");
368 }
369 #endif
370 }
461 uint32_t insn2 = insn_at(insn_addr, 1);
462 uint64_t dest = uint64_t(target);
463 dest = (dest & 0xffff0000ffffffff) |
464 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
465 target = address(dest);
466
467 // We know the destination 4k page. Maybe we have a third
468 // instruction.
469 uint32_t insn = insn_at(insn_addr, 0);
470 uint32_t insn3 = insn_at(insn_addr, 2);
471 ptrdiff_t byte_offset;
472 if (offset_for(insn, insn3, byte_offset)) {
473 target += byte_offset;
474 return 3;
475 } else {
476 return 2;
477 }
478 }
479 virtual int immediate(address insn_addr, address &target) {
480 uint32_t *insns = (uint32_t *)insn_addr;
481 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
482 // Move wide constant: movz, movk, movk. See movptr().
483 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
484 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
485 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
486 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
487 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
488 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
489 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
490 return 3;
491 }
492 virtual void verify(address insn_addr, address &target) {
493 }
494 };
495
496 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
497 AArch64Decoder decoder(insn_addr, insn);
498 address target;
499 decoder.run(insn_addr, target);
500 return target;
658 JavaThread::frame_anchor_offset()
659 + JavaFrameAnchor::last_Java_pc_offset()));
660
661 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
662 }
663
664 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
665 Register last_java_fp,
666 Label &L,
667 Register scratch) {
668 if (L.is_bound()) {
669 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
670 } else {
671 InstructionMark im(this);
672 L.add_patch_at(code(), locator());
673 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
674 }
675 }
676
677 static inline bool target_needs_far_branch(address addr) {
678 // codecache size <= 128M
679 if (!MacroAssembler::far_branches()) {
680 return false;
681 }
682 // codecache size > 240M
683 if (MacroAssembler::codestub_branch_needs_far_jump()) {
684 return true;
685 }
686 // codecache size: 128M..240M
687 return !CodeCache::is_non_nmethod(addr);
688 }
689
690 void MacroAssembler::far_call(Address entry, Register tmp) {
691 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
692 assert(CodeCache::find_blob(entry.target()) != nullptr,
693 "destination of far call not found in code cache");
694 assert(entry.rspec().type() == relocInfo::external_word_type
695 || entry.rspec().type() == relocInfo::runtime_call_type
696 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
697 if (target_needs_far_branch(entry.target())) {
833 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
834 Label ok;
835 cbz(rscratch1, ok);
836 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
837 br(rscratch1);
838 bind(ok);
839 }
840
841 // get oop result if there is one and reset the value in the thread
842 if (oop_result->is_valid()) {
843 get_vm_result(oop_result, java_thread);
844 }
845 }
846
847 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
848 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
849 }
850
851 // Check the entry target is always reachable from any branch.
852 static bool is_always_within_branch_range(Address entry) {
853 const address target = entry.target();
854
855 if (!CodeCache::contains(target)) {
856 // We always use trampolines for callees outside CodeCache.
857 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
858 return false;
859 }
860
861 if (!MacroAssembler::far_branches()) {
862 return true;
863 }
864
865 if (entry.rspec().type() == relocInfo::runtime_call_type) {
866 // Runtime calls are calls of a non-compiled method (stubs, adapters).
867 // Non-compiled methods stay forever in CodeCache.
868 // We check whether the longest possible branch is within the branch range.
869 assert(CodeCache::find_blob(target) != nullptr &&
870 !CodeCache::find_blob(target)->is_nmethod(),
871 "runtime call of compiled method");
872 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
3017
3018 #ifdef ASSERT
3019 {
3020 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3021 Label valid_global_tag;
3022 tbnz(value, 1, valid_global_tag); // Test for global tag
3023 stop("non global jobject using resolve_global_jobject");
3024 bind(valid_global_tag);
3025 }
3026 #endif
3027
3028 // Resolve global handle
3029 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3030 verify_oop(value);
3031
3032 bind(done);
3033 }
3034
3035 void MacroAssembler::stop(const char* msg) {
3036 BLOCK_COMMENT(msg);
3037 dcps1(0xdeae);
3038 emit_int64((uintptr_t)msg);
3039 }
3040
3041 void MacroAssembler::unimplemented(const char* what) {
3042 const char* buf = nullptr;
3043 {
3044 ResourceMark rm;
3045 stringStream ss;
3046 ss.print("unimplemented: %s", what);
3047 buf = code_string(ss.as_string());
3048 }
3049 stop(buf);
3050 }
3051
3052 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3053 #ifdef ASSERT
3054 Label OK;
3055 br(cc, OK);
3056 stop(msg);
3057 bind(OK);
3058 #endif
3116
3117 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3118 if (decrement.is_register()) {
3119 sub(Rd, Rn, decrement.as_register());
3120 } else {
3121 sub(Rd, Rn, decrement.as_constant());
3122 }
3123 }
3124
3125 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3126 if (decrement.is_register()) {
3127 subw(Rd, Rn, decrement.as_register());
3128 } else {
3129 subw(Rd, Rn, decrement.as_constant());
3130 }
3131 }
3132
3133 void MacroAssembler::reinit_heapbase()
3134 {
3135 if (UseCompressedOops) {
3136 if (Universe::is_fully_initialized()) {
3137 mov(rheapbase, CompressedOops::base());
3138 } else {
3139 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3140 ldr(rheapbase, Address(rheapbase));
3141 }
3142 }
3143 }
3144
3145 // this simulates the behaviour of the x86 cmpxchg instruction using a
3146 // load linked/store conditional pair. we use the acquire/release
3147 // versions of these instructions so that we flush pending writes as
3148 // per Java semantics.
3149
3150 // n.b the x86 version assumes the old value to be compared against is
3151 // in rax and updates rax with the value located in memory if the
3152 // cmpxchg fails. we supply a register for the old value explicitly
3153
3154 // the aarch64 load linked/store conditional instructions do not
3155 // accept an offset. so, unlike x86, we must provide a plain register
3156 // to identify the memory word to be compared/exchanged rather than a
5419 // the code cache so that if it is relocated we know it will still reach
5420 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5421 _adrp(reg1, dest.target());
5422 } else {
5423 uint64_t target = (uint64_t)dest.target();
5424 uint64_t adrp_target
5425 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5426
5427 _adrp(reg1, (address)adrp_target);
5428 movk(reg1, target >> 32, 32);
5429 }
5430 byte_offset = (uint64_t)dest.target() & 0xfff;
5431 }
5432
5433 void MacroAssembler::load_byte_map_base(Register reg) {
5434 CardTable::CardValue* byte_map_base =
5435 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5436
5437 // Strictly speaking the byte_map_base isn't an address at all, and it might
5438 // even be negative. It is thus materialised as a constant.
5439 mov(reg, (uint64_t)byte_map_base);
5440 }
5441
5442 void MacroAssembler::build_frame(int framesize) {
5443 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5444 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5445 protect_return_address();
5446 if (framesize < ((1 << 9) + 2 * wordSize)) {
5447 sub(sp, sp, framesize);
5448 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5449 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5450 } else {
5451 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5452 if (PreserveFramePointer) mov(rfp, sp);
5453 if (framesize < ((1 << 12) + 2 * wordSize))
5454 sub(sp, sp, framesize - 2 * wordSize);
5455 else {
5456 mov(rscratch1, framesize - 2 * wordSize);
5457 sub(sp, sp, rscratch1);
5458 }
5459 }
|
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "ci/ciUtilities.hpp"
31 #include "code/compiledIC.hpp"
32 #if INCLUDE_CDS
33 #include "code/SCCache.hpp"
34 #endif
35 #include "compiler/compileTask.hpp"
36 #include "compiler/disassembler.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/barrierSetAssembler.hpp"
40 #include "gc/shared/cardTableBarrierSet.hpp"
41 #include "gc/shared/cardTable.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "gc/shared/tlab_globals.hpp"
44 #include "interpreter/bytecodeHistogram.hpp"
45 #include "interpreter/interpreter.hpp"
46 #include "jvm.h"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "nativeInst_aarch64.hpp"
50 #include "oops/accessDecorators.hpp"
51 #include "oops/compressedKlass.inline.hpp"
52 #include "oops/compressedOops.inline.hpp"
53 #include "oops/klass.inline.hpp"
54 #include "runtime/continuation.hpp"
335 uint32_t insn2 = insn_at(insn_addr, 1);
336 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
337 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
338 guarantee(((dest >> size) << size) == dest, "misaligned target");
339 return 2;
340 }
341 static int adrpAdd_impl(address insn_addr, address &target) {
342 uintptr_t dest = (uintptr_t)target;
343 int offset_lo = dest & 0xfff;
344 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
345 return 2;
346 }
347 static int adrpMovk_impl(address insn_addr, address &target) {
348 uintptr_t dest = uintptr_t(target);
349 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
350 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
351 target = address(dest);
352 return 2;
353 }
354 virtual int immediate(address insn_addr, address &target) {
355 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
356 // We encode narrow ones by setting the upper 16 bits in the first
357 // instruction.
358 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
359 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
360 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
361 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
362 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
363 return 2;
364 }
365 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
366 uint64_t dest = (uint64_t)target;
367 // Move wide constant
368 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
369 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
370 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
371 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
372 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
373 return 3;
374 }
375 virtual void verify(address insn_addr, address &target) {
376 #ifdef ASSERT
377 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
378 if (!(address_is == target)) {
379 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
380 disnm((intptr_t)insn_addr);
381 assert(address_is == target, "should be");
382 }
383 #endif
384 }
475 uint32_t insn2 = insn_at(insn_addr, 1);
476 uint64_t dest = uint64_t(target);
477 dest = (dest & 0xffff0000ffffffff) |
478 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
479 target = address(dest);
480
481 // We know the destination 4k page. Maybe we have a third
482 // instruction.
483 uint32_t insn = insn_at(insn_addr, 0);
484 uint32_t insn3 = insn_at(insn_addr, 2);
485 ptrdiff_t byte_offset;
486 if (offset_for(insn, insn3, byte_offset)) {
487 target += byte_offset;
488 return 3;
489 } else {
490 return 2;
491 }
492 }
493 virtual int immediate(address insn_addr, address &target) {
494 uint32_t *insns = (uint32_t *)insn_addr;
495 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
496 // We encode narrow ones by setting the upper 16 bits in the first
497 // instruction.
498 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
499 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
500 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16)
501 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
502 target = (address)CompressedKlassPointers::decode(nk);
503 return 2;
504 }
505 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
506 // Move wide constant: movz, movk, movk. See movptr().
507 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
508 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
509 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
510 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
511 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
512 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
513 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
514 return 3;
515 }
516 virtual void verify(address insn_addr, address &target) {
517 }
518 };
519
520 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
521 AArch64Decoder decoder(insn_addr, insn);
522 address target;
523 decoder.run(insn_addr, target);
524 return target;
682 JavaThread::frame_anchor_offset()
683 + JavaFrameAnchor::last_Java_pc_offset()));
684
685 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
686 }
687
688 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
689 Register last_java_fp,
690 Label &L,
691 Register scratch) {
692 if (L.is_bound()) {
693 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
694 } else {
695 InstructionMark im(this);
696 L.add_patch_at(code(), locator());
697 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
698 }
699 }
700
701 static inline bool target_needs_far_branch(address addr) {
702 if (SCCache::is_on_for_write()) {
703 return true;
704 }
705 // codecache size <= 128M
706 if (!MacroAssembler::far_branches()) {
707 return false;
708 }
709 // codecache size > 240M
710 if (MacroAssembler::codestub_branch_needs_far_jump()) {
711 return true;
712 }
713 // codecache size: 128M..240M
714 return !CodeCache::is_non_nmethod(addr);
715 }
716
717 void MacroAssembler::far_call(Address entry, Register tmp) {
718 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
719 assert(CodeCache::find_blob(entry.target()) != nullptr,
720 "destination of far call not found in code cache");
721 assert(entry.rspec().type() == relocInfo::external_word_type
722 || entry.rspec().type() == relocInfo::runtime_call_type
723 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
724 if (target_needs_far_branch(entry.target())) {
860 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
861 Label ok;
862 cbz(rscratch1, ok);
863 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
864 br(rscratch1);
865 bind(ok);
866 }
867
868 // get oop result if there is one and reset the value in the thread
869 if (oop_result->is_valid()) {
870 get_vm_result(oop_result, java_thread);
871 }
872 }
873
874 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
875 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
876 }
877
878 // Check the entry target is always reachable from any branch.
879 static bool is_always_within_branch_range(Address entry) {
880 if (SCCache::is_on_for_write()) {
881 return false;
882 }
883 const address target = entry.target();
884
885 if (!CodeCache::contains(target)) {
886 // We always use trampolines for callees outside CodeCache.
887 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
888 return false;
889 }
890
891 if (!MacroAssembler::far_branches()) {
892 return true;
893 }
894
895 if (entry.rspec().type() == relocInfo::runtime_call_type) {
896 // Runtime calls are calls of a non-compiled method (stubs, adapters).
897 // Non-compiled methods stay forever in CodeCache.
898 // We check whether the longest possible branch is within the branch range.
899 assert(CodeCache::find_blob(target) != nullptr &&
900 !CodeCache::find_blob(target)->is_nmethod(),
901 "runtime call of compiled method");
902 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
3047
3048 #ifdef ASSERT
3049 {
3050 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3051 Label valid_global_tag;
3052 tbnz(value, 1, valid_global_tag); // Test for global tag
3053 stop("non global jobject using resolve_global_jobject");
3054 bind(valid_global_tag);
3055 }
3056 #endif
3057
3058 // Resolve global handle
3059 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3060 verify_oop(value);
3061
3062 bind(done);
3063 }
3064
3065 void MacroAssembler::stop(const char* msg) {
3066 BLOCK_COMMENT(msg);
3067 // load msg into r0 so we can access it from the signal handler
3068 // ExternalAddress enables saving and restoring via the code cache
3069 lea(c_rarg0, ExternalAddress((address) msg));
3070 dcps1(0xdeae);
3071 SCCache::add_C_string(msg);
3072 }
3073
3074 void MacroAssembler::unimplemented(const char* what) {
3075 const char* buf = nullptr;
3076 {
3077 ResourceMark rm;
3078 stringStream ss;
3079 ss.print("unimplemented: %s", what);
3080 buf = code_string(ss.as_string());
3081 }
3082 stop(buf);
3083 }
3084
3085 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3086 #ifdef ASSERT
3087 Label OK;
3088 br(cc, OK);
3089 stop(msg);
3090 bind(OK);
3091 #endif
3149
3150 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3151 if (decrement.is_register()) {
3152 sub(Rd, Rn, decrement.as_register());
3153 } else {
3154 sub(Rd, Rn, decrement.as_constant());
3155 }
3156 }
3157
3158 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3159 if (decrement.is_register()) {
3160 subw(Rd, Rn, decrement.as_register());
3161 } else {
3162 subw(Rd, Rn, decrement.as_constant());
3163 }
3164 }
3165
3166 void MacroAssembler::reinit_heapbase()
3167 {
3168 if (UseCompressedOops) {
3169 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) {
3170 mov(rheapbase, CompressedOops::base());
3171 } else {
3172 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3173 ldr(rheapbase, Address(rheapbase));
3174 }
3175 }
3176 }
3177
3178 // this simulates the behaviour of the x86 cmpxchg instruction using a
3179 // load linked/store conditional pair. we use the acquire/release
3180 // versions of these instructions so that we flush pending writes as
3181 // per Java semantics.
3182
3183 // n.b the x86 version assumes the old value to be compared against is
3184 // in rax and updates rax with the value located in memory if the
3185 // cmpxchg fails. we supply a register for the old value explicitly
3186
3187 // the aarch64 load linked/store conditional instructions do not
3188 // accept an offset. so, unlike x86, we must provide a plain register
3189 // to identify the memory word to be compared/exchanged rather than a
5452 // the code cache so that if it is relocated we know it will still reach
5453 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5454 _adrp(reg1, dest.target());
5455 } else {
5456 uint64_t target = (uint64_t)dest.target();
5457 uint64_t adrp_target
5458 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5459
5460 _adrp(reg1, (address)adrp_target);
5461 movk(reg1, target >> 32, 32);
5462 }
5463 byte_offset = (uint64_t)dest.target() & 0xfff;
5464 }
5465
5466 void MacroAssembler::load_byte_map_base(Register reg) {
5467 CardTable::CardValue* byte_map_base =
5468 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5469
5470 // Strictly speaking the byte_map_base isn't an address at all, and it might
5471 // even be negative. It is thus materialised as a constant.
5472 #if INCLUDE_CDS
5473 if (SCCache::is_on_for_write()) {
5474 // SCA needs relocation info for card table base
5475 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base)));
5476 } else {
5477 #endif
5478 mov(reg, (uint64_t)byte_map_base);
5479 #if INCLUDE_CDS
5480 }
5481 #endif
5482 }
5483
5484 void MacroAssembler::load_aotrc_address(Register reg, address a) {
5485 #if INCLUDE_CDS
5486 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
5487 if (SCCache::is_on_for_write()) {
5488 // all aotrc field addresses should be registered in the SCC address table
5489 lea(reg, ExternalAddress(a));
5490 } else {
5491 mov(reg, (uint64_t)a);
5492 }
5493 #else
5494 ShouldNotReachHere();
5495 #endif
5496 }
5497
5498 void MacroAssembler::build_frame(int framesize) {
5499 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5500 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5501 protect_return_address();
5502 if (framesize < ((1 << 9) + 2 * wordSize)) {
5503 sub(sp, sp, framesize);
5504 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5505 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5506 } else {
5507 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5508 if (PreserveFramePointer) mov(rfp, sp);
5509 if (framesize < ((1 << 12) + 2 * wordSize))
5510 sub(sp, sp, framesize - 2 * wordSize);
5511 else {
5512 mov(rscratch1, framesize - 2 * wordSize);
5513 sub(sp, sp, rscratch1);
5514 }
5515 }
|