9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "code/compiledIC.hpp"
30 #include "compiler/compileTask.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTable.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39 #include "interpreter/bytecodeHistogram.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/interpreterRuntime.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/klass.inline.hpp"
331 uint32_t insn2 = insn_at(insn_addr, 1);
332 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
333 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
334 guarantee(((dest >> size) << size) == dest, "misaligned target");
335 return 2;
336 }
337 static int adrpAdd_impl(address insn_addr, address &target) {
338 uintptr_t dest = (uintptr_t)target;
339 int offset_lo = dest & 0xfff;
340 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
341 return 2;
342 }
343 static int adrpMovk_impl(address insn_addr, address &target) {
344 uintptr_t dest = uintptr_t(target);
345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
346 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
347 target = address(dest);
348 return 2;
349 }
350 virtual int immediate(address insn_addr, address &target) {
351 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
352 uint64_t dest = (uint64_t)target;
353 // Move wide constant
354 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
355 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
356 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
357 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
358 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
359 return 3;
360 }
361 virtual void verify(address insn_addr, address &target) {
362 #ifdef ASSERT
363 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
364 if (!(address_is == target)) {
365 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
366 disnm((intptr_t)insn_addr);
367 assert(address_is == target, "should be");
368 }
369 #endif
370 }
461 uint32_t insn2 = insn_at(insn_addr, 1);
462 uint64_t dest = uint64_t(target);
463 dest = (dest & 0xffff0000ffffffff) |
464 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
465 target = address(dest);
466
467 // We know the destination 4k page. Maybe we have a third
468 // instruction.
469 uint32_t insn = insn_at(insn_addr, 0);
470 uint32_t insn3 = insn_at(insn_addr, 2);
471 ptrdiff_t byte_offset;
472 if (offset_for(insn, insn3, byte_offset)) {
473 target += byte_offset;
474 return 3;
475 } else {
476 return 2;
477 }
478 }
479 virtual int immediate(address insn_addr, address &target) {
480 uint32_t *insns = (uint32_t *)insn_addr;
481 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
482 // Move wide constant: movz, movk, movk. See movptr().
483 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
484 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
485 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
486 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
487 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
488 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
489 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
490 return 3;
491 }
492 virtual void verify(address insn_addr, address &target) {
493 }
494 };
495
496 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
497 AArch64Decoder decoder(insn_addr, insn);
498 address target;
499 decoder.run(insn_addr, target);
500 return target;
658 JavaThread::frame_anchor_offset()
659 + JavaFrameAnchor::last_Java_pc_offset()));
660
661 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
662 }
663
664 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
665 Register last_java_fp,
666 Label &L,
667 Register scratch) {
668 if (L.is_bound()) {
669 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
670 } else {
671 InstructionMark im(this);
672 L.add_patch_at(code(), locator());
673 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
674 }
675 }
676
677 static inline bool target_needs_far_branch(address addr) {
678 // codecache size <= 128M
679 if (!MacroAssembler::far_branches()) {
680 return false;
681 }
682 // codecache size > 240M
683 if (MacroAssembler::codestub_branch_needs_far_jump()) {
684 return true;
685 }
686 // codecache size: 128M..240M
687 return !CodeCache::is_non_nmethod(addr);
688 }
689
690 void MacroAssembler::far_call(Address entry, Register tmp) {
691 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
692 assert(CodeCache::find_blob(entry.target()) != nullptr,
693 "destination of far call not found in code cache");
694 assert(entry.rspec().type() == relocInfo::external_word_type
695 || entry.rspec().type() == relocInfo::runtime_call_type
696 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
697 if (target_needs_far_branch(entry.target())) {
842 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
843 Label ok;
844 cbz(rscratch1, ok);
845 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
846 br(rscratch1);
847 bind(ok);
848 }
849
850 // get oop result if there is one and reset the value in the thread
851 if (oop_result->is_valid()) {
852 get_vm_result(oop_result, java_thread);
853 }
854 }
855
856 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
857 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
858 }
859
860 // Check the entry target is always reachable from any branch.
861 static bool is_always_within_branch_range(Address entry) {
862 const address target = entry.target();
863
864 if (!CodeCache::contains(target)) {
865 // We always use trampolines for callees outside CodeCache.
866 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
867 return false;
868 }
869
870 if (!MacroAssembler::far_branches()) {
871 return true;
872 }
873
874 if (entry.rspec().type() == relocInfo::runtime_call_type) {
875 // Runtime calls are calls of a non-compiled method (stubs, adapters).
876 // Non-compiled methods stay forever in CodeCache.
877 // We check whether the longest possible branch is within the branch range.
878 assert(CodeCache::find_blob(target) != nullptr &&
879 !CodeCache::find_blob(target)->is_nmethod(),
880 "runtime call of compiled method");
881 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
2140 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2141 assert(offset1 - offset == stackElementSize, "correct arithmetic");
2142 #endif
2143 if (arg_slot.is_constant()) {
2144 return Address(esp, arg_slot.as_constant() * stackElementSize
2145 + offset);
2146 } else {
2147 add(rscratch1, esp, arg_slot.as_register(),
2148 ext::uxtx, exact_log2(stackElementSize));
2149 return Address(rscratch1, offset);
2150 }
2151 }
2152
2153 void MacroAssembler::call_VM_leaf_base(address entry_point,
2154 int number_of_arguments,
2155 Label *retaddr) {
2156 Label E, L;
2157
2158 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2159
2160 mov(rscratch1, entry_point);
2161 blr(rscratch1);
2162 if (retaddr)
2163 bind(*retaddr);
2164
2165 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2166 }
2167
2168 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2169 call_VM_leaf_base(entry_point, number_of_arguments);
2170 }
2171
2172 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2173 pass_arg0(this, arg_0);
2174 call_VM_leaf_base(entry_point, 1);
2175 }
2176
2177 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2178 assert_different_registers(arg_1, c_rarg0);
2179 pass_arg0(this, arg_0);
2180 pass_arg1(this, arg_1);
3218
3219 #ifdef ASSERT
3220 {
3221 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3222 Label valid_global_tag;
3223 tbnz(value, 1, valid_global_tag); // Test for global tag
3224 stop("non global jobject using resolve_global_jobject");
3225 bind(valid_global_tag);
3226 }
3227 #endif
3228
3229 // Resolve global handle
3230 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3231 verify_oop(value);
3232
3233 bind(done);
3234 }
3235
3236 void MacroAssembler::stop(const char* msg) {
3237 BLOCK_COMMENT(msg);
3238 dcps1(0xdeae);
3239 emit_int64((uintptr_t)msg);
3240 }
3241
3242 void MacroAssembler::unimplemented(const char* what) {
3243 const char* buf = nullptr;
3244 {
3245 ResourceMark rm;
3246 stringStream ss;
3247 ss.print("unimplemented: %s", what);
3248 buf = code_string(ss.as_string());
3249 }
3250 stop(buf);
3251 }
3252
3253 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3254 #ifdef ASSERT
3255 Label OK;
3256 br(cc, OK);
3257 stop(msg);
3258 bind(OK);
3259 #endif
3317
3318 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3319 if (decrement.is_register()) {
3320 sub(Rd, Rn, decrement.as_register());
3321 } else {
3322 sub(Rd, Rn, decrement.as_constant());
3323 }
3324 }
3325
3326 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3327 if (decrement.is_register()) {
3328 subw(Rd, Rn, decrement.as_register());
3329 } else {
3330 subw(Rd, Rn, decrement.as_constant());
3331 }
3332 }
3333
3334 void MacroAssembler::reinit_heapbase()
3335 {
3336 if (UseCompressedOops) {
3337 if (Universe::is_fully_initialized()) {
3338 mov(rheapbase, CompressedOops::base());
3339 } else {
3340 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3341 ldr(rheapbase, Address(rheapbase));
3342 }
3343 }
3344 }
3345
3346 // this simulates the behaviour of the x86 cmpxchg instruction using a
3347 // load linked/store conditional pair. we use the acquire/release
3348 // versions of these instructions so that we flush pending writes as
3349 // per Java semantics.
3350
3351 // n.b the x86 version assumes the old value to be compared against is
3352 // in rax and updates rax with the value located in memory if the
3353 // cmpxchg fails. we supply a register for the old value explicitly
3354
3355 // the aarch64 load linked/store conditional instructions do not
3356 // accept an offset. so, unlike x86, we must provide a plain register
3357 // to identify the memory word to be compared/exchanged rather than a
5686 // the code cache so that if it is relocated we know it will still reach
5687 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5688 _adrp(reg1, dest.target());
5689 } else {
5690 uint64_t target = (uint64_t)dest.target();
5691 uint64_t adrp_target
5692 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5693
5694 _adrp(reg1, (address)adrp_target);
5695 movk(reg1, target >> 32, 32);
5696 }
5697 byte_offset = (uint64_t)dest.target() & 0xfff;
5698 }
5699
5700 void MacroAssembler::load_byte_map_base(Register reg) {
5701 CardTable::CardValue* byte_map_base =
5702 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5703
5704 // Strictly speaking the byte_map_base isn't an address at all, and it might
5705 // even be negative. It is thus materialised as a constant.
5706 mov(reg, (uint64_t)byte_map_base);
5707 }
5708
5709 void MacroAssembler::build_frame(int framesize) {
5710 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5711 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5712 protect_return_address();
5713 if (framesize < ((1 << 9) + 2 * wordSize)) {
5714 sub(sp, sp, framesize);
5715 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5716 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5717 } else {
5718 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5719 if (PreserveFramePointer) mov(rfp, sp);
5720 if (framesize < ((1 << 12) + 2 * wordSize))
5721 sub(sp, sp, framesize - 2 * wordSize);
5722 else {
5723 mov(rscratch1, framesize - 2 * wordSize);
5724 sub(sp, sp, rscratch1);
5725 }
5726 }
|
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciUtilities.hpp"
30 #include "code/compiledIC.hpp"
31 #if INCLUDE_CDS
32 #include "code/SCCache.hpp"
33 #endif
34 #include "compiler/compileTask.hpp"
35 #include "compiler/disassembler.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/barrierSetAssembler.hpp"
39 #include "gc/shared/cardTableBarrierSet.hpp"
40 #include "gc/shared/cardTable.hpp"
41 #include "gc/shared/collectedHeap.hpp"
42 #include "gc/shared/tlab_globals.hpp"
43 #include "interpreter/bytecodeHistogram.hpp"
44 #include "interpreter/interpreter.hpp"
45 #include "interpreter/interpreterRuntime.hpp"
46 #include "jvm.h"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "nativeInst_aarch64.hpp"
50 #include "oops/accessDecorators.hpp"
51 #include "oops/compressedKlass.inline.hpp"
52 #include "oops/compressedOops.inline.hpp"
53 #include "oops/klass.inline.hpp"
335 uint32_t insn2 = insn_at(insn_addr, 1);
336 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
337 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
338 guarantee(((dest >> size) << size) == dest, "misaligned target");
339 return 2;
340 }
341 static int adrpAdd_impl(address insn_addr, address &target) {
342 uintptr_t dest = (uintptr_t)target;
343 int offset_lo = dest & 0xfff;
344 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
345 return 2;
346 }
347 static int adrpMovk_impl(address insn_addr, address &target) {
348 uintptr_t dest = uintptr_t(target);
349 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
350 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
351 target = address(dest);
352 return 2;
353 }
354 virtual int immediate(address insn_addr, address &target) {
355 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
356 // We encode narrow ones by setting the upper 16 bits in the first
357 // instruction.
358 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
359 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
360 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
361 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
362 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
363 return 2;
364 }
365 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
366 uint64_t dest = (uint64_t)target;
367 // Move wide constant
368 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
369 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
370 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
371 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
372 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
373 return 3;
374 }
375 virtual void verify(address insn_addr, address &target) {
376 #ifdef ASSERT
377 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
378 if (!(address_is == target)) {
379 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
380 disnm((intptr_t)insn_addr);
381 assert(address_is == target, "should be");
382 }
383 #endif
384 }
475 uint32_t insn2 = insn_at(insn_addr, 1);
476 uint64_t dest = uint64_t(target);
477 dest = (dest & 0xffff0000ffffffff) |
478 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
479 target = address(dest);
480
481 // We know the destination 4k page. Maybe we have a third
482 // instruction.
483 uint32_t insn = insn_at(insn_addr, 0);
484 uint32_t insn3 = insn_at(insn_addr, 2);
485 ptrdiff_t byte_offset;
486 if (offset_for(insn, insn3, byte_offset)) {
487 target += byte_offset;
488 return 3;
489 } else {
490 return 2;
491 }
492 }
493 virtual int immediate(address insn_addr, address &target) {
494 uint32_t *insns = (uint32_t *)insn_addr;
495 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
496 // We encode narrow ones by setting the upper 16 bits in the first
497 // instruction.
498 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
499 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
500 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16)
501 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
502 target = (address)CompressedKlassPointers::decode(nk);
503 return 2;
504 }
505 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
506 // Move wide constant: movz, movk, movk. See movptr().
507 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
508 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
509 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
510 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
511 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
512 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
513 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
514 return 3;
515 }
516 virtual void verify(address insn_addr, address &target) {
517 }
518 };
519
520 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
521 AArch64Decoder decoder(insn_addr, insn);
522 address target;
523 decoder.run(insn_addr, target);
524 return target;
682 JavaThread::frame_anchor_offset()
683 + JavaFrameAnchor::last_Java_pc_offset()));
684
685 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
686 }
687
688 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
689 Register last_java_fp,
690 Label &L,
691 Register scratch) {
692 if (L.is_bound()) {
693 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
694 } else {
695 InstructionMark im(this);
696 L.add_patch_at(code(), locator());
697 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
698 }
699 }
700
701 static inline bool target_needs_far_branch(address addr) {
702 if (SCCache::is_on_for_write()) {
703 return true;
704 }
705 // codecache size <= 128M
706 if (!MacroAssembler::far_branches()) {
707 return false;
708 }
709 // codecache size > 240M
710 if (MacroAssembler::codestub_branch_needs_far_jump()) {
711 return true;
712 }
713 // codecache size: 128M..240M
714 return !CodeCache::is_non_nmethod(addr);
715 }
716
717 void MacroAssembler::far_call(Address entry, Register tmp) {
718 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
719 assert(CodeCache::find_blob(entry.target()) != nullptr,
720 "destination of far call not found in code cache");
721 assert(entry.rspec().type() == relocInfo::external_word_type
722 || entry.rspec().type() == relocInfo::runtime_call_type
723 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
724 if (target_needs_far_branch(entry.target())) {
869 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
870 Label ok;
871 cbz(rscratch1, ok);
872 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
873 br(rscratch1);
874 bind(ok);
875 }
876
877 // get oop result if there is one and reset the value in the thread
878 if (oop_result->is_valid()) {
879 get_vm_result(oop_result, java_thread);
880 }
881 }
882
883 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
884 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
885 }
886
887 // Check the entry target is always reachable from any branch.
888 static bool is_always_within_branch_range(Address entry) {
889 if (SCCache::is_on_for_write()) {
890 return false;
891 }
892 const address target = entry.target();
893
894 if (!CodeCache::contains(target)) {
895 // We always use trampolines for callees outside CodeCache.
896 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
897 return false;
898 }
899
900 if (!MacroAssembler::far_branches()) {
901 return true;
902 }
903
904 if (entry.rspec().type() == relocInfo::runtime_call_type) {
905 // Runtime calls are calls of a non-compiled method (stubs, adapters).
906 // Non-compiled methods stay forever in CodeCache.
907 // We check whether the longest possible branch is within the branch range.
908 assert(CodeCache::find_blob(target) != nullptr &&
909 !CodeCache::find_blob(target)->is_nmethod(),
910 "runtime call of compiled method");
911 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
2170 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2171 assert(offset1 - offset == stackElementSize, "correct arithmetic");
2172 #endif
2173 if (arg_slot.is_constant()) {
2174 return Address(esp, arg_slot.as_constant() * stackElementSize
2175 + offset);
2176 } else {
2177 add(rscratch1, esp, arg_slot.as_register(),
2178 ext::uxtx, exact_log2(stackElementSize));
2179 return Address(rscratch1, offset);
2180 }
2181 }
2182
2183 void MacroAssembler::call_VM_leaf_base(address entry_point,
2184 int number_of_arguments,
2185 Label *retaddr) {
2186 Label E, L;
2187
2188 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2189
2190 mov(rscratch1, RuntimeAddress(entry_point));
2191 blr(rscratch1);
2192 if (retaddr)
2193 bind(*retaddr);
2194
2195 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2196 }
2197
2198 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2199 call_VM_leaf_base(entry_point, number_of_arguments);
2200 }
2201
2202 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2203 pass_arg0(this, arg_0);
2204 call_VM_leaf_base(entry_point, 1);
2205 }
2206
2207 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2208 assert_different_registers(arg_1, c_rarg0);
2209 pass_arg0(this, arg_0);
2210 pass_arg1(this, arg_1);
3248
3249 #ifdef ASSERT
3250 {
3251 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3252 Label valid_global_tag;
3253 tbnz(value, 1, valid_global_tag); // Test for global tag
3254 stop("non global jobject using resolve_global_jobject");
3255 bind(valid_global_tag);
3256 }
3257 #endif
3258
3259 // Resolve global handle
3260 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3261 verify_oop(value);
3262
3263 bind(done);
3264 }
3265
3266 void MacroAssembler::stop(const char* msg) {
3267 BLOCK_COMMENT(msg);
3268 // load msg into r0 so we can access it from the signal handler
3269 // ExternalAddress enables saving and restoring via the code cache
3270 lea(c_rarg0, ExternalAddress((address) msg));
3271 dcps1(0xdeae);
3272 SCCache::add_C_string(msg);
3273 }
3274
3275 void MacroAssembler::unimplemented(const char* what) {
3276 const char* buf = nullptr;
3277 {
3278 ResourceMark rm;
3279 stringStream ss;
3280 ss.print("unimplemented: %s", what);
3281 buf = code_string(ss.as_string());
3282 }
3283 stop(buf);
3284 }
3285
3286 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3287 #ifdef ASSERT
3288 Label OK;
3289 br(cc, OK);
3290 stop(msg);
3291 bind(OK);
3292 #endif
3350
3351 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3352 if (decrement.is_register()) {
3353 sub(Rd, Rn, decrement.as_register());
3354 } else {
3355 sub(Rd, Rn, decrement.as_constant());
3356 }
3357 }
3358
3359 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3360 if (decrement.is_register()) {
3361 subw(Rd, Rn, decrement.as_register());
3362 } else {
3363 subw(Rd, Rn, decrement.as_constant());
3364 }
3365 }
3366
3367 void MacroAssembler::reinit_heapbase()
3368 {
3369 if (UseCompressedOops) {
3370 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) {
3371 mov(rheapbase, CompressedOops::base());
3372 } else {
3373 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3374 ldr(rheapbase, Address(rheapbase));
3375 }
3376 }
3377 }
3378
3379 // this simulates the behaviour of the x86 cmpxchg instruction using a
3380 // load linked/store conditional pair. we use the acquire/release
3381 // versions of these instructions so that we flush pending writes as
3382 // per Java semantics.
3383
3384 // n.b the x86 version assumes the old value to be compared against is
3385 // in rax and updates rax with the value located in memory if the
3386 // cmpxchg fails. we supply a register for the old value explicitly
3387
3388 // the aarch64 load linked/store conditional instructions do not
3389 // accept an offset. so, unlike x86, we must provide a plain register
3390 // to identify the memory word to be compared/exchanged rather than a
5719 // the code cache so that if it is relocated we know it will still reach
5720 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5721 _adrp(reg1, dest.target());
5722 } else {
5723 uint64_t target = (uint64_t)dest.target();
5724 uint64_t adrp_target
5725 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5726
5727 _adrp(reg1, (address)adrp_target);
5728 movk(reg1, target >> 32, 32);
5729 }
5730 byte_offset = (uint64_t)dest.target() & 0xfff;
5731 }
5732
5733 void MacroAssembler::load_byte_map_base(Register reg) {
5734 CardTable::CardValue* byte_map_base =
5735 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5736
5737 // Strictly speaking the byte_map_base isn't an address at all, and it might
5738 // even be negative. It is thus materialised as a constant.
5739 #if INCLUDE_CDS
5740 if (SCCache::is_on_for_write()) {
5741 // SCA needs relocation info for card table base
5742 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base)));
5743 } else {
5744 #endif
5745 mov(reg, (uint64_t)byte_map_base);
5746 #if INCLUDE_CDS
5747 }
5748 #endif
5749 }
5750
5751 void MacroAssembler::load_aotrc_address(Register reg, address a) {
5752 #if INCLUDE_CDS
5753 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
5754 if (SCCache::is_on_for_write()) {
5755 // all aotrc field addresses should be registered in the SCC address table
5756 lea(reg, ExternalAddress(a));
5757 } else {
5758 mov(reg, (uint64_t)a);
5759 }
5760 #else
5761 ShouldNotReachHere();
5762 #endif
5763 }
5764
5765 void MacroAssembler::build_frame(int framesize) {
5766 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5767 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5768 protect_return_address();
5769 if (framesize < ((1 << 9) + 2 * wordSize)) {
5770 sub(sp, sp, framesize);
5771 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5772 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5773 } else {
5774 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5775 if (PreserveFramePointer) mov(rfp, sp);
5776 if (framesize < ((1 << 12) + 2 * wordSize))
5777 sub(sp, sp, framesize - 2 * wordSize);
5778 else {
5779 mov(rscratch1, framesize - 2 * wordSize);
5780 sub(sp, sp, rscratch1);
5781 }
5782 }
|