10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/interpreterRuntime.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedKlass.inline.hpp"
49 #include "oops/compressedOops.inline.hpp"
50 #include "oops/klass.inline.hpp"
332 uint32_t insn2 = insn_at(insn_addr, 1);
333 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
334 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
335 guarantee(((dest >> size) << size) == dest, "misaligned target");
336 return 2;
337 }
338 static int adrpAdd_impl(address insn_addr, address &target) {
339 uintptr_t dest = (uintptr_t)target;
340 int offset_lo = dest & 0xfff;
341 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
342 return 2;
343 }
344 static int adrpMovk_impl(address insn_addr, address &target) {
345 uintptr_t dest = uintptr_t(target);
346 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
347 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
348 target = address(dest);
349 return 2;
350 }
351 virtual int immediate(address insn_addr, address &target) {
352 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
353 uint64_t dest = (uint64_t)target;
354 // Move wide constant
355 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
356 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
357 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
358 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
359 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
360 return 3;
361 }
362 virtual void verify(address insn_addr, address &target) {
363 #ifdef ASSERT
364 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
365 if (!(address_is == target)) {
366 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
367 disnm((intptr_t)insn_addr);
368 assert(address_is == target, "should be");
369 }
370 #endif
371 }
462 uint32_t insn2 = insn_at(insn_addr, 1);
463 uint64_t dest = uint64_t(target);
464 dest = (dest & 0xffff0000ffffffff) |
465 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
466 target = address(dest);
467
468 // We know the destination 4k page. Maybe we have a third
469 // instruction.
470 uint32_t insn = insn_at(insn_addr, 0);
471 uint32_t insn3 = insn_at(insn_addr, 2);
472 ptrdiff_t byte_offset;
473 if (offset_for(insn, insn3, byte_offset)) {
474 target += byte_offset;
475 return 3;
476 } else {
477 return 2;
478 }
479 }
480 virtual int immediate(address insn_addr, address &target) {
481 uint32_t *insns = (uint32_t *)insn_addr;
482 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
483 // Move wide constant: movz, movk, movk. See movptr().
484 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
485 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
486 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
487 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
488 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
489 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
490 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
491 return 3;
492 }
493 virtual void verify(address insn_addr, address &target) {
494 }
495 };
496
497 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
498 AArch64Decoder decoder(insn_addr, insn);
499 address target;
500 decoder.run(insn_addr, target);
501 return target;
659 JavaThread::frame_anchor_offset()
660 + JavaFrameAnchor::last_Java_pc_offset()));
661
662 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
663 }
664
665 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
666 Register last_java_fp,
667 Label &L,
668 Register scratch) {
669 if (L.is_bound()) {
670 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
671 } else {
672 InstructionMark im(this);
673 L.add_patch_at(code(), locator());
674 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
675 }
676 }
677
678 static inline bool target_needs_far_branch(address addr) {
679 // codecache size <= 128M
680 if (!MacroAssembler::far_branches()) {
681 return false;
682 }
683 // codecache size > 240M
684 if (MacroAssembler::codestub_branch_needs_far_jump()) {
685 return true;
686 }
687 // codecache size: 128M..240M
688 return !CodeCache::is_non_nmethod(addr);
689 }
690
691 void MacroAssembler::far_call(Address entry, Register tmp) {
692 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
693 assert(CodeCache::find_blob(entry.target()) != nullptr,
694 "destination of far call not found in code cache");
695 assert(entry.rspec().type() == relocInfo::external_word_type
696 || entry.rspec().type() == relocInfo::runtime_call_type
697 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
698 if (target_needs_far_branch(entry.target())) {
843 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
844 Label ok;
845 cbz(rscratch1, ok);
846 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
847 br(rscratch1);
848 bind(ok);
849 }
850
851 // get oop result if there is one and reset the value in the thread
852 if (oop_result->is_valid()) {
853 get_vm_result(oop_result, java_thread);
854 }
855 }
856
857 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
858 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
859 }
860
861 // Check the entry target is always reachable from any branch.
862 static bool is_always_within_branch_range(Address entry) {
863 const address target = entry.target();
864
865 if (!CodeCache::contains(target)) {
866 // We always use trampolines for callees outside CodeCache.
867 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
868 return false;
869 }
870
871 if (!MacroAssembler::far_branches()) {
872 return true;
873 }
874
875 if (entry.rspec().type() == relocInfo::runtime_call_type) {
876 // Runtime calls are calls of a non-compiled method (stubs, adapters).
877 // Non-compiled methods stay forever in CodeCache.
878 // We check whether the longest possible branch is within the branch range.
879 assert(CodeCache::find_blob(target) != nullptr &&
880 !CodeCache::find_blob(target)->is_nmethod(),
881 "runtime call of compiled method");
882 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
3219
3220 #ifdef ASSERT
3221 {
3222 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3223 Label valid_global_tag;
3224 tbnz(value, 1, valid_global_tag); // Test for global tag
3225 stop("non global jobject using resolve_global_jobject");
3226 bind(valid_global_tag);
3227 }
3228 #endif
3229
3230 // Resolve global handle
3231 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3232 verify_oop(value);
3233
3234 bind(done);
3235 }
3236
3237 void MacroAssembler::stop(const char* msg) {
3238 BLOCK_COMMENT(msg);
3239 dcps1(0xdeae);
3240 emit_int64((uintptr_t)msg);
3241 }
3242
3243 void MacroAssembler::unimplemented(const char* what) {
3244 const char* buf = nullptr;
3245 {
3246 ResourceMark rm;
3247 stringStream ss;
3248 ss.print("unimplemented: %s", what);
3249 buf = code_string(ss.as_string());
3250 }
3251 stop(buf);
3252 }
3253
3254 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3255 #ifdef ASSERT
3256 Label OK;
3257 br(cc, OK);
3258 stop(msg);
3259 bind(OK);
3260 #endif
3318
3319 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3320 if (decrement.is_register()) {
3321 sub(Rd, Rn, decrement.as_register());
3322 } else {
3323 sub(Rd, Rn, decrement.as_constant());
3324 }
3325 }
3326
3327 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3328 if (decrement.is_register()) {
3329 subw(Rd, Rn, decrement.as_register());
3330 } else {
3331 subw(Rd, Rn, decrement.as_constant());
3332 }
3333 }
3334
3335 void MacroAssembler::reinit_heapbase()
3336 {
3337 if (UseCompressedOops) {
3338 if (Universe::is_fully_initialized()) {
3339 mov(rheapbase, CompressedOops::base());
3340 } else {
3341 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3342 ldr(rheapbase, Address(rheapbase));
3343 }
3344 }
3345 }
3346
3347 // this simulates the behaviour of the x86 cmpxchg instruction using a
3348 // load linked/store conditional pair. we use the acquire/release
3349 // versions of these instructions so that we flush pending writes as
3350 // per Java semantics.
3351
3352 // n.b the x86 version assumes the old value to be compared against is
3353 // in rax and updates rax with the value located in memory if the
3354 // cmpxchg fails. we supply a register for the old value explicitly
3355
3356 // the aarch64 load linked/store conditional instructions do not
3357 // accept an offset. so, unlike x86, we must provide a plain register
3358 // to identify the memory word to be compared/exchanged rather than a
5672 // the code cache so that if it is relocated we know it will still reach
5673 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5674 _adrp(reg1, dest.target());
5675 } else {
5676 uint64_t target = (uint64_t)dest.target();
5677 uint64_t adrp_target
5678 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5679
5680 _adrp(reg1, (address)adrp_target);
5681 movk(reg1, target >> 32, 32);
5682 }
5683 byte_offset = (uint64_t)dest.target() & 0xfff;
5684 }
5685
5686 void MacroAssembler::load_byte_map_base(Register reg) {
5687 CardTable::CardValue* byte_map_base =
5688 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5689
5690 // Strictly speaking the byte_map_base isn't an address at all, and it might
5691 // even be negative. It is thus materialised as a constant.
5692 mov(reg, (uint64_t)byte_map_base);
5693 }
5694
5695 void MacroAssembler::build_frame(int framesize) {
5696 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5697 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5698 protect_return_address();
5699 if (framesize < ((1 << 9) + 2 * wordSize)) {
5700 sub(sp, sp, framesize);
5701 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5702 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5703 } else {
5704 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5705 if (PreserveFramePointer) mov(rfp, sp);
5706 if (framesize < ((1 << 12) + 2 * wordSize))
5707 sub(sp, sp, framesize - 2 * wordSize);
5708 else {
5709 mov(rscratch1, framesize - 2 * wordSize);
5710 sub(sp, sp, rscratch1);
5711 }
5712 }
|
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "ci/ciUtilities.hpp"
31 #include "code/compiledIC.hpp"
32 #if INCLUDE_CDS
33 #include "code/SCCache.hpp"
34 #endif
35 #include "compiler/compileTask.hpp"
36 #include "compiler/disassembler.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/barrierSetAssembler.hpp"
40 #include "gc/shared/cardTableBarrierSet.hpp"
41 #include "gc/shared/cardTable.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "gc/shared/tlab_globals.hpp"
44 #include "interpreter/bytecodeHistogram.hpp"
45 #include "interpreter/interpreter.hpp"
46 #include "interpreter/interpreterRuntime.hpp"
47 #include "jvm.h"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "nativeInst_aarch64.hpp"
51 #include "oops/accessDecorators.hpp"
52 #include "oops/compressedKlass.inline.hpp"
53 #include "oops/compressedOops.inline.hpp"
54 #include "oops/klass.inline.hpp"
336 uint32_t insn2 = insn_at(insn_addr, 1);
337 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
338 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
339 guarantee(((dest >> size) << size) == dest, "misaligned target");
340 return 2;
341 }
342 static int adrpAdd_impl(address insn_addr, address &target) {
343 uintptr_t dest = (uintptr_t)target;
344 int offset_lo = dest & 0xfff;
345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
346 return 2;
347 }
348 static int adrpMovk_impl(address insn_addr, address &target) {
349 uintptr_t dest = uintptr_t(target);
350 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
351 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
352 target = address(dest);
353 return 2;
354 }
355 virtual int immediate(address insn_addr, address &target) {
356 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
357 // We encode narrow ones by setting the upper 16 bits in the first
358 // instruction.
359 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
360 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
361 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
362 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
363 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
364 return 2;
365 }
366 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
367 uint64_t dest = (uint64_t)target;
368 // Move wide constant
369 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
370 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
371 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
372 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
373 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
374 return 3;
375 }
376 virtual void verify(address insn_addr, address &target) {
377 #ifdef ASSERT
378 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
379 if (!(address_is == target)) {
380 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
381 disnm((intptr_t)insn_addr);
382 assert(address_is == target, "should be");
383 }
384 #endif
385 }
476 uint32_t insn2 = insn_at(insn_addr, 1);
477 uint64_t dest = uint64_t(target);
478 dest = (dest & 0xffff0000ffffffff) |
479 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
480 target = address(dest);
481
482 // We know the destination 4k page. Maybe we have a third
483 // instruction.
484 uint32_t insn = insn_at(insn_addr, 0);
485 uint32_t insn3 = insn_at(insn_addr, 2);
486 ptrdiff_t byte_offset;
487 if (offset_for(insn, insn3, byte_offset)) {
488 target += byte_offset;
489 return 3;
490 } else {
491 return 2;
492 }
493 }
494 virtual int immediate(address insn_addr, address &target) {
495 uint32_t *insns = (uint32_t *)insn_addr;
496 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
497 // We encode narrow ones by setting the upper 16 bits in the first
498 // instruction.
499 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
500 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
501 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16)
502 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
503 target = (address)CompressedKlassPointers::decode(nk);
504 return 2;
505 }
506 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
507 // Move wide constant: movz, movk, movk. See movptr().
508 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
509 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
510 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
511 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
512 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
513 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
514 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
515 return 3;
516 }
517 virtual void verify(address insn_addr, address &target) {
518 }
519 };
520
521 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
522 AArch64Decoder decoder(insn_addr, insn);
523 address target;
524 decoder.run(insn_addr, target);
525 return target;
683 JavaThread::frame_anchor_offset()
684 + JavaFrameAnchor::last_Java_pc_offset()));
685
686 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
687 }
688
689 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
690 Register last_java_fp,
691 Label &L,
692 Register scratch) {
693 if (L.is_bound()) {
694 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
695 } else {
696 InstructionMark im(this);
697 L.add_patch_at(code(), locator());
698 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
699 }
700 }
701
702 static inline bool target_needs_far_branch(address addr) {
703 if (SCCache::is_on_for_write()) {
704 return true;
705 }
706 // codecache size <= 128M
707 if (!MacroAssembler::far_branches()) {
708 return false;
709 }
710 // codecache size > 240M
711 if (MacroAssembler::codestub_branch_needs_far_jump()) {
712 return true;
713 }
714 // codecache size: 128M..240M
715 return !CodeCache::is_non_nmethod(addr);
716 }
717
718 void MacroAssembler::far_call(Address entry, Register tmp) {
719 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
720 assert(CodeCache::find_blob(entry.target()) != nullptr,
721 "destination of far call not found in code cache");
722 assert(entry.rspec().type() == relocInfo::external_word_type
723 || entry.rspec().type() == relocInfo::runtime_call_type
724 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
725 if (target_needs_far_branch(entry.target())) {
870 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
871 Label ok;
872 cbz(rscratch1, ok);
873 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
874 br(rscratch1);
875 bind(ok);
876 }
877
878 // get oop result if there is one and reset the value in the thread
879 if (oop_result->is_valid()) {
880 get_vm_result(oop_result, java_thread);
881 }
882 }
883
884 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
885 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
886 }
887
888 // Check the entry target is always reachable from any branch.
889 static bool is_always_within_branch_range(Address entry) {
890 if (SCCache::is_on_for_write()) {
891 return false;
892 }
893 const address target = entry.target();
894
895 if (!CodeCache::contains(target)) {
896 // We always use trampolines for callees outside CodeCache.
897 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
898 return false;
899 }
900
901 if (!MacroAssembler::far_branches()) {
902 return true;
903 }
904
905 if (entry.rspec().type() == relocInfo::runtime_call_type) {
906 // Runtime calls are calls of a non-compiled method (stubs, adapters).
907 // Non-compiled methods stay forever in CodeCache.
908 // We check whether the longest possible branch is within the branch range.
909 assert(CodeCache::find_blob(target) != nullptr &&
910 !CodeCache::find_blob(target)->is_nmethod(),
911 "runtime call of compiled method");
912 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
3249
3250 #ifdef ASSERT
3251 {
3252 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3253 Label valid_global_tag;
3254 tbnz(value, 1, valid_global_tag); // Test for global tag
3255 stop("non global jobject using resolve_global_jobject");
3256 bind(valid_global_tag);
3257 }
3258 #endif
3259
3260 // Resolve global handle
3261 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3262 verify_oop(value);
3263
3264 bind(done);
3265 }
3266
3267 void MacroAssembler::stop(const char* msg) {
3268 BLOCK_COMMENT(msg);
3269 // load msg into r0 so we can access it from the signal handler
3270 // ExternalAddress enables saving and restoring via the code cache
3271 lea(c_rarg0, ExternalAddress((address) msg));
3272 dcps1(0xdeae);
3273 SCCache::add_C_string(msg);
3274 }
3275
3276 void MacroAssembler::unimplemented(const char* what) {
3277 const char* buf = nullptr;
3278 {
3279 ResourceMark rm;
3280 stringStream ss;
3281 ss.print("unimplemented: %s", what);
3282 buf = code_string(ss.as_string());
3283 }
3284 stop(buf);
3285 }
3286
3287 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3288 #ifdef ASSERT
3289 Label OK;
3290 br(cc, OK);
3291 stop(msg);
3292 bind(OK);
3293 #endif
3351
3352 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3353 if (decrement.is_register()) {
3354 sub(Rd, Rn, decrement.as_register());
3355 } else {
3356 sub(Rd, Rn, decrement.as_constant());
3357 }
3358 }
3359
3360 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3361 if (decrement.is_register()) {
3362 subw(Rd, Rn, decrement.as_register());
3363 } else {
3364 subw(Rd, Rn, decrement.as_constant());
3365 }
3366 }
3367
3368 void MacroAssembler::reinit_heapbase()
3369 {
3370 if (UseCompressedOops) {
3371 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) {
3372 mov(rheapbase, CompressedOops::base());
3373 } else {
3374 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3375 ldr(rheapbase, Address(rheapbase));
3376 }
3377 }
3378 }
3379
3380 // this simulates the behaviour of the x86 cmpxchg instruction using a
3381 // load linked/store conditional pair. we use the acquire/release
3382 // versions of these instructions so that we flush pending writes as
3383 // per Java semantics.
3384
3385 // n.b the x86 version assumes the old value to be compared against is
3386 // in rax and updates rax with the value located in memory if the
3387 // cmpxchg fails. we supply a register for the old value explicitly
3388
3389 // the aarch64 load linked/store conditional instructions do not
3390 // accept an offset. so, unlike x86, we must provide a plain register
3391 // to identify the memory word to be compared/exchanged rather than a
5705 // the code cache so that if it is relocated we know it will still reach
5706 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5707 _adrp(reg1, dest.target());
5708 } else {
5709 uint64_t target = (uint64_t)dest.target();
5710 uint64_t adrp_target
5711 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5712
5713 _adrp(reg1, (address)adrp_target);
5714 movk(reg1, target >> 32, 32);
5715 }
5716 byte_offset = (uint64_t)dest.target() & 0xfff;
5717 }
5718
5719 void MacroAssembler::load_byte_map_base(Register reg) {
5720 CardTable::CardValue* byte_map_base =
5721 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5722
5723 // Strictly speaking the byte_map_base isn't an address at all, and it might
5724 // even be negative. It is thus materialised as a constant.
5725 #if INCLUDE_CDS
5726 if (SCCache::is_on_for_write()) {
5727 // SCA needs relocation info for card table base
5728 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base)));
5729 } else {
5730 #endif
5731 mov(reg, (uint64_t)byte_map_base);
5732 #if INCLUDE_CDS
5733 }
5734 #endif
5735 }
5736
5737 void MacroAssembler::load_aotrc_address(Register reg, address a) {
5738 #if INCLUDE_CDS
5739 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
5740 if (SCCache::is_on_for_write()) {
5741 // all aotrc field addresses should be registered in the SCC address table
5742 lea(reg, ExternalAddress(a));
5743 } else {
5744 mov(reg, (uint64_t)a);
5745 }
5746 #else
5747 ShouldNotReachHere();
5748 #endif
5749 }
5750
5751 void MacroAssembler::build_frame(int framesize) {
5752 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5753 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5754 protect_return_address();
5755 if (framesize < ((1 << 9) + 2 * wordSize)) {
5756 sub(sp, sp, framesize);
5757 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5758 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5759 } else {
5760 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5761 if (PreserveFramePointer) mov(rfp, sp);
5762 if (framesize < ((1 << 12) + 2 * wordSize))
5763 sub(sp, sp, framesize - 2 * wordSize);
5764 else {
5765 mov(rscratch1, framesize - 2 * wordSize);
5766 sub(sp, sp, rscratch1);
5767 }
5768 }
|