16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_aarch64.inline.hpp"
47
48
49 #ifndef PRODUCT
50 #define COMMENT(x) do { __ block_comment(x); } while (0)
51 #else
52 #define COMMENT(x)
53 #endif
54
55 NEEDS_CLEANUP // remove this definitions ?
56 const Register SYNC_header = r0; // synchronization header
57 const Register SHIFT_count = r0; // where count for shift operations must be
58
59 #define __ _masm->
60
61
414 if (LockingMode == LM_MONITOR) {
415 __ b(*stub->entry());
416 } else {
417 __ unlock_object(r5, r4, r0, r6, *stub->entry());
418 }
419 __ bind(*stub->continuation());
420 }
421
422 if (compilation()->env()->dtrace_method_probes()) {
423 __ mov(c_rarg0, rthread);
424 __ mov_metadata(c_rarg1, method()->constant_encoding());
425 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
426 }
427
428 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
429 __ mov(r0, r19); // Restore the exception
430 }
431
432 // remove the activation and dispatch to the unwind handler
433 __ block_comment("remove_frame and dispatch to the unwind handler");
434 __ remove_frame(initial_frame_size_in_bytes());
435 __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
436
437 // Emit the slow path assembly
438 if (stub != nullptr) {
439 stub->emit_code(this);
440 }
441
442 return offset;
443 }
444
445
446 int LIR_Assembler::emit_deopt_handler() {
447 // generate code for exception handler
448 address handler_base = __ start_a_stub(deopt_handler_size());
449 if (handler_base == nullptr) {
450 // not enough space left for the handler
451 bailout("deopt handler overflow");
452 return -1;
453 }
454
458 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
459 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
460 __ end_a_stub();
461
462 return offset;
463 }
464
465 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
466 _masm->code_section()->relocate(adr, relocInfo::poll_type);
467 int pc_offset = code_offset();
468 flush_debug_info(pc_offset);
469 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
470 if (info->exception_handlers() != nullptr) {
471 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
472 }
473 }
474
475 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
476 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
477
478 // Pop the stack before the safepoint code
479 __ remove_frame(initial_frame_size_in_bytes());
480
481 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
482 __ reserved_stack_check();
483 }
484
485 code_stub->set_safepoint_offset(__ offset());
486 __ relocate(relocInfo::poll_return_type);
487 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
488 __ ret(lr);
489 }
490
491 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
492 guarantee(info != nullptr, "Shouldn't be null");
493 __ get_polling_page(rscratch1, relocInfo::poll_type);
494 add_debug_info_for_branch(info); // This isn't just debug info:
495 // it's the oop map
496 __ read_polling_page(rscratch1, relocInfo::poll_type);
497 return __ offset();
498 }
499
500
501 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
502 if (from_reg == r31_sp)
503 from_reg = sp;
504 if (to_reg == r31_sp)
505 to_reg = sp;
506 __ mov(to_reg, from_reg);
507 }
508
509 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
510
517 switch (c->type()) {
518 case T_INT: {
519 assert(patch_code == lir_patch_none, "no patching handled here");
520 __ movw(dest->as_register(), c->as_jint());
521 break;
522 }
523
524 case T_ADDRESS: {
525 assert(patch_code == lir_patch_none, "no patching handled here");
526 __ mov(dest->as_register(), c->as_jint());
527 break;
528 }
529
530 case T_LONG: {
531 assert(patch_code == lir_patch_none, "no patching handled here");
532 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
533 break;
534 }
535
536 case T_OBJECT: {
537 if (patch_code == lir_patch_none) {
538 jobject2reg(c->as_jobject(), dest->as_register());
539 } else {
540 jobject2reg_with_patching(dest->as_register(), info);
541 }
542 break;
543 }
544
545 case T_METADATA: {
546 if (patch_code != lir_patch_none) {
547 klass2reg_with_patching(dest->as_register(), info);
548 } else {
549 __ mov_metadata(dest->as_register(), c->as_metadata());
550 }
551 break;
552 }
553
554 case T_FLOAT: {
555 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
556 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
557 } else {
558 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
559 __ ldrs(dest->as_float_reg(), Address(rscratch1));
560 }
630 LIR_Const* c = src->as_constant_ptr();
631 LIR_Address* to_addr = dest->as_address_ptr();
632
633 void (Assembler::* insn)(Register Rt, const Address &adr);
634
635 switch (type) {
636 case T_ADDRESS:
637 assert(c->as_jint() == 0, "should be");
638 insn = &Assembler::str;
639 break;
640 case T_LONG:
641 assert(c->as_jlong() == 0, "should be");
642 insn = &Assembler::str;
643 break;
644 case T_INT:
645 assert(c->as_jint() == 0, "should be");
646 insn = &Assembler::strw;
647 break;
648 case T_OBJECT:
649 case T_ARRAY:
650 assert(c->as_jobject() == nullptr, "should be");
651 if (UseCompressedOops && !wide) {
652 insn = &Assembler::strw;
653 } else {
654 insn = &Assembler::str;
655 }
656 break;
657 case T_CHAR:
658 case T_SHORT:
659 assert(c->as_jint() == 0, "should be");
660 insn = &Assembler::strh;
661 break;
662 case T_BOOLEAN:
663 case T_BYTE:
664 assert(c->as_jint() == 0, "should be");
665 insn = &Assembler::strb;
666 break;
667 default:
668 ShouldNotReachHere();
669 insn = &Assembler::str; // unreachable
980 case T_SHORT:
981 __ ldrsh(dest->as_register(), as_Address(from_addr));
982 break;
983
984 default:
985 ShouldNotReachHere();
986 }
987
988 if (is_reference_type(type)) {
989 if (UseCompressedOops && !wide) {
990 __ decode_heap_oop(dest->as_register());
991 }
992
993 if (!(UseZGC && !ZGenerational)) {
994 // Load barrier has not yet been applied, so ZGC can't verify the oop here
995 __ verify_oop(dest->as_register());
996 }
997 }
998 }
999
1000
1001 int LIR_Assembler::array_element_size(BasicType type) const {
1002 int elem_size = type2aelembytes(type);
1003 return exact_log2(elem_size);
1004 }
1005
1006
1007 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1008 switch (op->code()) {
1009 case lir_idiv:
1010 case lir_irem:
1011 arithmetic_idiv(op->code(),
1012 op->in_opr1(),
1013 op->in_opr2(),
1014 op->in_opr3(),
1015 op->result_opr(),
1016 op->info());
1017 break;
1018 case lir_fmad:
1019 __ fmaddd(op->result_opr()->as_double_reg(),
1171 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1172 __ ldarb(rscratch1, rscratch1);
1173 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1174 add_debug_info_for_null_check_here(op->stub()->info());
1175 __ br(Assembler::NE, *op->stub()->entry());
1176 }
1177 __ allocate_object(op->obj()->as_register(),
1178 op->tmp1()->as_register(),
1179 op->tmp2()->as_register(),
1180 op->header_size(),
1181 op->object_size(),
1182 op->klass()->as_register(),
1183 *op->stub()->entry());
1184 __ bind(*op->stub()->continuation());
1185 }
1186
1187 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1188 Register len = op->len()->as_register();
1189 __ uxtw(len, len);
1190
1191 if (UseSlowPath ||
1192 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1193 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1194 __ b(*op->stub()->entry());
1195 } else {
1196 Register tmp1 = op->tmp1()->as_register();
1197 Register tmp2 = op->tmp2()->as_register();
1198 Register tmp3 = op->tmp3()->as_register();
1199 if (len == tmp1) {
1200 tmp1 = tmp3;
1201 } else if (len == tmp2) {
1202 tmp2 = tmp3;
1203 } else if (len == tmp3) {
1204 // everything is ok
1205 } else {
1206 __ mov(tmp3, len);
1207 }
1208 __ allocate_array(op->obj()->as_register(),
1209 len,
1210 tmp1,
1211 tmp2,
1277 assert(data != nullptr, "need data for type check");
1278 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1279 }
1280 Label* success_target = success;
1281 Label* failure_target = failure;
1282
1283 if (obj == k_RInfo) {
1284 k_RInfo = dst;
1285 } else if (obj == klass_RInfo) {
1286 klass_RInfo = dst;
1287 }
1288 if (k->is_loaded() && !UseCompressedClassPointers) {
1289 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1290 } else {
1291 Rtmp1 = op->tmp3()->as_register();
1292 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1293 }
1294
1295 assert_different_registers(obj, k_RInfo, klass_RInfo);
1296
1297 if (should_profile) {
1298 Register mdo = klass_RInfo;
1299 __ mov_metadata(mdo, md->constant_encoding());
1300 Label not_null;
1301 __ cbnz(obj, not_null);
1302 // Object is null; update MDO and exit
1303 Address data_addr
1304 = __ form_address(rscratch2, mdo,
1305 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1306 0);
1307 __ ldrb(rscratch1, data_addr);
1308 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1309 __ strb(rscratch1, data_addr);
1310 __ b(*obj_is_null);
1311 __ bind(not_null);
1312
1313 Label update_done;
1314 Register recv = k_RInfo;
1315 __ load_klass(recv, obj);
1316 type_profile_helper(mdo, md, data, recv, &update_done);
1317 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1318 __ addptr(counter_addr, DataLayout::counter_increment);
1319
1320 __ bind(update_done);
1321 } else {
1322 __ cbz(obj, *obj_is_null);
1323 }
1324
1325 if (!k->is_loaded()) {
1326 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1327 } else {
1328 __ mov_metadata(k_RInfo, k->constant_encoding());
1329 }
1330 __ verify_oop(obj);
1331
1332 if (op->fast_check()) {
1333 // get object class
1334 // not a safepoint as obj null check happens earlier
1335 __ load_klass(rscratch1, obj);
1336 __ cmp( rscratch1, k_RInfo);
1337
1338 __ br(Assembler::NE, *failure_target);
1339 // successful cast, fall through to profile or jump
1340 } else {
1341 // get object class
1342 // not a safepoint as obj null check happens earlier
1461 __ bind(success);
1462 if (dst != obj) {
1463 __ mov(dst, obj);
1464 }
1465 } else if (code == lir_instanceof) {
1466 Register obj = op->object()->as_register();
1467 Register dst = op->result_opr()->as_register();
1468 Label success, failure, done;
1469 emit_typecheck_helper(op, &success, &failure, &failure);
1470 __ bind(failure);
1471 __ mov(dst, zr);
1472 __ b(done);
1473 __ bind(success);
1474 __ mov(dst, 1);
1475 __ bind(done);
1476 } else {
1477 ShouldNotReachHere();
1478 }
1479 }
1480
1481 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1482 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1483 __ cset(rscratch1, Assembler::NE);
1484 __ membar(__ AnyAny);
1485 }
1486
1487 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1488 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1489 __ cset(rscratch1, Assembler::NE);
1490 __ membar(__ AnyAny);
1491 }
1492
1493
1494 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1495 Register addr;
1496 if (op->addr()->is_register()) {
1497 addr = as_reg(op->addr());
1498 } else {
1499 assert(op->addr()->is_address(), "what else?");
1500 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1977 __ cmp(left->as_register_lo(), right->as_register_lo());
1978 __ mov(dst->as_register(), (uint64_t)-1L);
1979 __ br(Assembler::LT, done);
1980 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1981 __ bind(done);
1982 } else {
1983 ShouldNotReachHere();
1984 }
1985 }
1986
1987
1988 void LIR_Assembler::align_call(LIR_Code code) { }
1989
1990
1991 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1992 address call = __ trampoline_call(Address(op->addr(), rtype));
1993 if (call == nullptr) {
1994 bailout("trampoline stub overflow");
1995 return;
1996 }
1997 add_call_info(code_offset(), op->info());
1998 __ post_call_nop();
1999 }
2000
2001
2002 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2003 address call = __ ic_call(op->addr());
2004 if (call == nullptr) {
2005 bailout("trampoline stub overflow");
2006 return;
2007 }
2008 add_call_info(code_offset(), op->info());
2009 __ post_call_nop();
2010 }
2011
2012 void LIR_Assembler::emit_static_call_stub() {
2013 address call_pc = __ pc();
2014 address stub = __ start_a_stub(call_stub_size());
2015 if (stub == nullptr) {
2016 bailout("static call stub overflow");
2017 return;
2018 }
2019
2020 int start = __ offset();
2021
2022 __ relocate(static_stub_Relocation::spec(call_pc));
2023 __ emit_static_call_stub();
2024
2025 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2026 <= call_stub_size(), "stub too big");
2027 __ end_a_stub();
2028 }
2151
2152
2153 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2154 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2155 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2156 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2157 __ mov (rscratch1, c);
2158 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2159 }
2160
2161
2162 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2163 ShouldNotReachHere();
2164 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2165 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2166 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2167 __ lea(rscratch1, __ constant_oop_address(o));
2168 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2169 }
2170
2171
2172 // This code replaces a call to arraycopy; no exception may
2173 // be thrown in this code, they must be thrown in the System.arraycopy
2174 // activation frame; we could save some checks if this would not be the case
2175 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2176 ciArrayKlass* default_type = op->expected_type();
2177 Register src = op->src()->as_register();
2178 Register dst = op->dst()->as_register();
2179 Register src_pos = op->src_pos()->as_register();
2180 Register dst_pos = op->dst_pos()->as_register();
2181 Register length = op->length()->as_register();
2182 Register tmp = op->tmp()->as_register();
2183
2184 CodeStub* stub = op->stub();
2185 int flags = op->flags();
2186 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2187 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2188
2189 // if we don't know anything, just go through the generic arraycopy
2190 if (default_type == nullptr // || basic_type == T_OBJECT
2191 ) {
2192 Label done;
2193 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2194
2195 // Save the arguments in case the generic arraycopy fails and we
2196 // have to fall back to the JNI stub
2197 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2198 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2199 __ str(src, Address(sp, 4*BytesPerWord));
2200
2201 address copyfunc_addr = StubRoutines::generic_arraycopy();
2202 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2203
2204 // The arguments are in java calling convention so we shift them
2205 // to C convention
2206 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2207 __ mov(c_rarg0, j_rarg0);
2208 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2222 __ cbz(r0, *stub->continuation());
2223
2224 // Reload values from the stack so they are where the stub
2225 // expects them.
2226 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2227 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2228 __ ldr(src, Address(sp, 4*BytesPerWord));
2229
2230 // r0 is -1^K where K == partial copied count
2231 __ eonw(rscratch1, r0, zr);
2232 // adjust length down and src/end pos up by partial copied count
2233 __ subw(length, length, rscratch1);
2234 __ addw(src_pos, src_pos, rscratch1);
2235 __ addw(dst_pos, dst_pos, rscratch1);
2236 __ b(*stub->entry());
2237
2238 __ bind(*stub->continuation());
2239 return;
2240 }
2241
2242 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2243
2244 int elem_size = type2aelembytes(basic_type);
2245 int scale = exact_log2(elem_size);
2246
2247 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2248 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2249 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2250 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2251
2252 // test for null
2253 if (flags & LIR_OpArrayCopy::src_null_check) {
2254 __ cbz(src, *stub->entry());
2255 }
2256 if (flags & LIR_OpArrayCopy::dst_null_check) {
2257 __ cbz(dst, *stub->entry());
2258 }
2259
2260 // If the compiler was not able to prove that exact type of the source or the destination
2261 // of the arraycopy is an array type, check at runtime if the source or the destination is
2811 __ verify_klass_ptr(tmp);
2812 #endif
2813 } else {
2814 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2815 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2816
2817 __ ldr(tmp, mdo_addr);
2818 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2819
2820 __ orr(tmp, tmp, TypeEntries::type_unknown);
2821 __ str(tmp, mdo_addr);
2822 // FIXME: Write barrier needed here?
2823 }
2824 }
2825
2826 __ bind(next);
2827 }
2828 COMMENT("} emit_profile_type");
2829 }
2830
2831
2832 void LIR_Assembler::align_backward_branch_target() {
2833 }
2834
2835
2836 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2837 // tmp must be unused
2838 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2839
2840 if (left->is_single_cpu()) {
2841 assert(dest->is_single_cpu(), "expect single result reg");
2842 __ negw(dest->as_register(), left->as_register());
2843 } else if (left->is_double_cpu()) {
2844 assert(dest->is_double_cpu(), "expect double result reg");
2845 __ neg(dest->as_register_lo(), left->as_register_lo());
2846 } else if (left->is_single_fpu()) {
2847 assert(dest->is_single_fpu(), "expect single float result reg");
2848 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2849 } else {
2850 assert(left->is_double_fpu(), "expect double float operand reg");
2951 void LIR_Assembler::membar_loadload() {
2952 __ membar(Assembler::LoadLoad);
2953 }
2954
2955 void LIR_Assembler::membar_storestore() {
2956 __ membar(MacroAssembler::StoreStore);
2957 }
2958
2959 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2960
2961 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2962
2963 void LIR_Assembler::on_spin_wait() {
2964 __ spin_wait();
2965 }
2966
2967 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2968 __ mov(result_reg->as_register(), rthread);
2969 }
2970
2971
2972 void LIR_Assembler::peephole(LIR_List *lir) {
2973 #if 0
2974 if (tableswitch_count >= max_tableswitches)
2975 return;
2976
2977 /*
2978 This finite-state automaton recognizes sequences of compare-and-
2979 branch instructions. We will turn them into a tableswitch. You
2980 could argue that C1 really shouldn't be doing this sort of
2981 optimization, but without it the code is really horrible.
2982 */
2983
2984 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2985 int first_key, last_key = -2147483648;
2986 int next_key = 0;
2987 int start_insn = -1;
2988 int last_insn = -1;
2989 Register reg = noreg;
2990 LIR_Opr reg_opr;
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInlineKlass.hpp"
37 #include "ci/ciInstance.hpp"
38 #include "code/compiledIC.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gc_globals.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_aarch64.inline.hpp"
49
50
51 #ifndef PRODUCT
52 #define COMMENT(x) do { __ block_comment(x); } while (0)
53 #else
54 #define COMMENT(x)
55 #endif
56
57 NEEDS_CLEANUP // remove this definitions ?
58 const Register SYNC_header = r0; // synchronization header
59 const Register SHIFT_count = r0; // where count for shift operations must be
60
61 #define __ _masm->
62
63
416 if (LockingMode == LM_MONITOR) {
417 __ b(*stub->entry());
418 } else {
419 __ unlock_object(r5, r4, r0, r6, *stub->entry());
420 }
421 __ bind(*stub->continuation());
422 }
423
424 if (compilation()->env()->dtrace_method_probes()) {
425 __ mov(c_rarg0, rthread);
426 __ mov_metadata(c_rarg1, method()->constant_encoding());
427 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
428 }
429
430 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
431 __ mov(r0, r19); // Restore the exception
432 }
433
434 // remove the activation and dispatch to the unwind handler
435 __ block_comment("remove_frame and dispatch to the unwind handler");
436 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
437 __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
438
439 // Emit the slow path assembly
440 if (stub != nullptr) {
441 stub->emit_code(this);
442 }
443
444 return offset;
445 }
446
447
448 int LIR_Assembler::emit_deopt_handler() {
449 // generate code for exception handler
450 address handler_base = __ start_a_stub(deopt_handler_size());
451 if (handler_base == nullptr) {
452 // not enough space left for the handler
453 bailout("deopt handler overflow");
454 return -1;
455 }
456
460 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
461 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
462 __ end_a_stub();
463
464 return offset;
465 }
466
467 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
468 _masm->code_section()->relocate(adr, relocInfo::poll_type);
469 int pc_offset = code_offset();
470 flush_debug_info(pc_offset);
471 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
472 if (info->exception_handlers() != nullptr) {
473 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
474 }
475 }
476
477 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
478 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
479
480 if (InlineTypeReturnedAsFields) {
481 // Check if we are returning an non-null inline type and load its fields into registers
482 ciType* return_type = compilation()->method()->return_type();
483 if (return_type->is_inlinetype()) {
484 ciInlineKlass* vk = return_type->as_inline_klass();
485 if (vk->can_be_returned_as_fields()) {
486 address unpack_handler = vk->unpack_handler();
487 assert(unpack_handler != nullptr, "must be");
488 __ far_call(RuntimeAddress(unpack_handler));
489 }
490 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
491 Label skip;
492 __ test_oop_is_not_inline_type(r0, rscratch2, skip);
493
494 // Load fields from a buffered value with an inline class specific handler
495 __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
496 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
497 __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
498 // Unpack handler can be null if inline type is not scalarizable in returns
499 __ cbz(rscratch1, skip);
500 __ blr(rscratch1);
501
502 __ bind(skip);
503 }
504 // At this point, r0 points to the value object (for interpreter or C1 caller).
505 // The fields of the object are copied into registers (for C2 caller).
506 }
507
508 // Pop the stack before the safepoint code
509 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
510
511 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
512 __ reserved_stack_check();
513 }
514
515 code_stub->set_safepoint_offset(__ offset());
516 __ relocate(relocInfo::poll_return_type);
517 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
518 __ ret(lr);
519 }
520
521 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
522 return (__ store_inline_type_fields_to_buf(vk, false));
523 }
524
525 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
526 guarantee(info != nullptr, "Shouldn't be null");
527 __ get_polling_page(rscratch1, relocInfo::poll_type);
528 add_debug_info_for_branch(info); // This isn't just debug info:
529 // it's the oop map
530 __ read_polling_page(rscratch1, relocInfo::poll_type);
531 return __ offset();
532 }
533
534
535 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
536 if (from_reg == r31_sp)
537 from_reg = sp;
538 if (to_reg == r31_sp)
539 to_reg = sp;
540 __ mov(to_reg, from_reg);
541 }
542
543 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
544
551 switch (c->type()) {
552 case T_INT: {
553 assert(patch_code == lir_patch_none, "no patching handled here");
554 __ movw(dest->as_register(), c->as_jint());
555 break;
556 }
557
558 case T_ADDRESS: {
559 assert(patch_code == lir_patch_none, "no patching handled here");
560 __ mov(dest->as_register(), c->as_jint());
561 break;
562 }
563
564 case T_LONG: {
565 assert(patch_code == lir_patch_none, "no patching handled here");
566 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
567 break;
568 }
569
570 case T_OBJECT: {
571 if (patch_code != lir_patch_none) {
572 jobject2reg_with_patching(dest->as_register(), info);
573 } else {
574 jobject2reg(c->as_jobject(), dest->as_register());
575 }
576 break;
577 }
578
579 case T_METADATA: {
580 if (patch_code != lir_patch_none) {
581 klass2reg_with_patching(dest->as_register(), info);
582 } else {
583 __ mov_metadata(dest->as_register(), c->as_metadata());
584 }
585 break;
586 }
587
588 case T_FLOAT: {
589 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
590 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
591 } else {
592 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
593 __ ldrs(dest->as_float_reg(), Address(rscratch1));
594 }
664 LIR_Const* c = src->as_constant_ptr();
665 LIR_Address* to_addr = dest->as_address_ptr();
666
667 void (Assembler::* insn)(Register Rt, const Address &adr);
668
669 switch (type) {
670 case T_ADDRESS:
671 assert(c->as_jint() == 0, "should be");
672 insn = &Assembler::str;
673 break;
674 case T_LONG:
675 assert(c->as_jlong() == 0, "should be");
676 insn = &Assembler::str;
677 break;
678 case T_INT:
679 assert(c->as_jint() == 0, "should be");
680 insn = &Assembler::strw;
681 break;
682 case T_OBJECT:
683 case T_ARRAY:
684 // Non-null case is not handled on aarch64 but handled on x86
685 // FIXME: do we need to add it here?
686 assert(c->as_jobject() == nullptr, "should be");
687 if (UseCompressedOops && !wide) {
688 insn = &Assembler::strw;
689 } else {
690 insn = &Assembler::str;
691 }
692 break;
693 case T_CHAR:
694 case T_SHORT:
695 assert(c->as_jint() == 0, "should be");
696 insn = &Assembler::strh;
697 break;
698 case T_BOOLEAN:
699 case T_BYTE:
700 assert(c->as_jint() == 0, "should be");
701 insn = &Assembler::strb;
702 break;
703 default:
704 ShouldNotReachHere();
705 insn = &Assembler::str; // unreachable
1016 case T_SHORT:
1017 __ ldrsh(dest->as_register(), as_Address(from_addr));
1018 break;
1019
1020 default:
1021 ShouldNotReachHere();
1022 }
1023
1024 if (is_reference_type(type)) {
1025 if (UseCompressedOops && !wide) {
1026 __ decode_heap_oop(dest->as_register());
1027 }
1028
1029 if (!(UseZGC && !ZGenerational)) {
1030 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1031 __ verify_oop(dest->as_register());
1032 }
1033 }
1034 }
1035
1036 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1037 assert(dst->is_cpu_register(), "must be");
1038 assert(dst->type() == src->type(), "must be");
1039
1040 if (src->is_cpu_register()) {
1041 reg2reg(src, dst);
1042 } else if (src->is_stack()) {
1043 stack2reg(src, dst, dst->type());
1044 } else if (src->is_constant()) {
1045 const2reg(src, dst, lir_patch_none, nullptr);
1046 } else {
1047 ShouldNotReachHere();
1048 }
1049 }
1050
1051 int LIR_Assembler::array_element_size(BasicType type) const {
1052 int elem_size = type2aelembytes(type);
1053 return exact_log2(elem_size);
1054 }
1055
1056
1057 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1058 switch (op->code()) {
1059 case lir_idiv:
1060 case lir_irem:
1061 arithmetic_idiv(op->code(),
1062 op->in_opr1(),
1063 op->in_opr2(),
1064 op->in_opr3(),
1065 op->result_opr(),
1066 op->info());
1067 break;
1068 case lir_fmad:
1069 __ fmaddd(op->result_opr()->as_double_reg(),
1221 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1222 __ ldarb(rscratch1, rscratch1);
1223 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1224 add_debug_info_for_null_check_here(op->stub()->info());
1225 __ br(Assembler::NE, *op->stub()->entry());
1226 }
1227 __ allocate_object(op->obj()->as_register(),
1228 op->tmp1()->as_register(),
1229 op->tmp2()->as_register(),
1230 op->header_size(),
1231 op->object_size(),
1232 op->klass()->as_register(),
1233 *op->stub()->entry());
1234 __ bind(*op->stub()->continuation());
1235 }
1236
1237 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1238 Register len = op->len()->as_register();
1239 __ uxtw(len, len);
1240
1241 if (UseSlowPath || op->is_null_free() ||
1242 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1243 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1244 __ b(*op->stub()->entry());
1245 } else {
1246 Register tmp1 = op->tmp1()->as_register();
1247 Register tmp2 = op->tmp2()->as_register();
1248 Register tmp3 = op->tmp3()->as_register();
1249 if (len == tmp1) {
1250 tmp1 = tmp3;
1251 } else if (len == tmp2) {
1252 tmp2 = tmp3;
1253 } else if (len == tmp3) {
1254 // everything is ok
1255 } else {
1256 __ mov(tmp3, len);
1257 }
1258 __ allocate_array(op->obj()->as_register(),
1259 len,
1260 tmp1,
1261 tmp2,
1327 assert(data != nullptr, "need data for type check");
1328 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1329 }
1330 Label* success_target = success;
1331 Label* failure_target = failure;
1332
1333 if (obj == k_RInfo) {
1334 k_RInfo = dst;
1335 } else if (obj == klass_RInfo) {
1336 klass_RInfo = dst;
1337 }
1338 if (k->is_loaded() && !UseCompressedClassPointers) {
1339 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1340 } else {
1341 Rtmp1 = op->tmp3()->as_register();
1342 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1343 }
1344
1345 assert_different_registers(obj, k_RInfo, klass_RInfo);
1346
1347 if (op->need_null_check()) {
1348 if (should_profile) {
1349 Register mdo = klass_RInfo;
1350 __ mov_metadata(mdo, md->constant_encoding());
1351 Label not_null;
1352 __ cbnz(obj, not_null);
1353 // Object is null; update MDO and exit
1354 Address data_addr
1355 = __ form_address(rscratch2, mdo,
1356 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1357 0);
1358 __ ldrb(rscratch1, data_addr);
1359 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1360 __ strb(rscratch1, data_addr);
1361 __ b(*obj_is_null);
1362 __ bind(not_null);
1363
1364 Label update_done;
1365 Register recv = k_RInfo;
1366 __ load_klass(recv, obj);
1367 type_profile_helper(mdo, md, data, recv, &update_done);
1368 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1369 __ addptr(counter_addr, DataLayout::counter_increment);
1370
1371 __ bind(update_done);
1372 } else {
1373 __ cbz(obj, *obj_is_null);
1374 }
1375 }
1376
1377 if (!k->is_loaded()) {
1378 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1379 } else {
1380 __ mov_metadata(k_RInfo, k->constant_encoding());
1381 }
1382 __ verify_oop(obj);
1383
1384 if (op->fast_check()) {
1385 // get object class
1386 // not a safepoint as obj null check happens earlier
1387 __ load_klass(rscratch1, obj);
1388 __ cmp( rscratch1, k_RInfo);
1389
1390 __ br(Assembler::NE, *failure_target);
1391 // successful cast, fall through to profile or jump
1392 } else {
1393 // get object class
1394 // not a safepoint as obj null check happens earlier
1513 __ bind(success);
1514 if (dst != obj) {
1515 __ mov(dst, obj);
1516 }
1517 } else if (code == lir_instanceof) {
1518 Register obj = op->object()->as_register();
1519 Register dst = op->result_opr()->as_register();
1520 Label success, failure, done;
1521 emit_typecheck_helper(op, &success, &failure, &failure);
1522 __ bind(failure);
1523 __ mov(dst, zr);
1524 __ b(done);
1525 __ bind(success);
1526 __ mov(dst, 1);
1527 __ bind(done);
1528 } else {
1529 ShouldNotReachHere();
1530 }
1531 }
1532
1533 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1534 // We are loading/storing from/to an array that *may* be a flat array (the
1535 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1536 // If this array is a flat array, take the slow path.
1537 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1538 if (!op->value()->is_illegal()) {
1539 // The array is not a flat array, but it might be null-free. If we are storing
1540 // a null into a null-free array, take the slow path (which will throw NPE).
1541 Label skip;
1542 __ cbnz(op->value()->as_register(), skip);
1543 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1544 __ bind(skip);
1545 }
1546 }
1547
1548 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1549 // We are storing into an array that *may* be null-free (the declared type is
1550 // Object[], abstract[], interface[] or VT.ref[]).
1551 Label test_mark_word;
1552 Register tmp = op->tmp()->as_register();
1553 __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1554 __ tst(tmp, markWord::unlocked_value);
1555 __ br(Assembler::NE, test_mark_word);
1556 __ load_prototype_header(tmp, op->array()->as_register());
1557 __ bind(test_mark_word);
1558 __ tst(tmp, markWord::null_free_array_bit_in_place);
1559 }
1560
1561 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1562 Label L_oops_equal;
1563 Label L_oops_not_equal;
1564 Label L_end;
1565
1566 Register left = op->left()->as_register();
1567 Register right = op->right()->as_register();
1568
1569 __ cmp(left, right);
1570 __ br(Assembler::EQ, L_oops_equal);
1571
1572 // (1) Null check -- if one of the operands is null, the other must not be null (because
1573 // the two references are not equal), so they are not substitutable,
1574 // FIXME: do null check only if the operand is nullable
1575 {
1576 __ cbz(left, L_oops_not_equal);
1577 __ cbz(right, L_oops_not_equal);
1578 }
1579
1580 ciKlass* left_klass = op->left_klass();
1581 ciKlass* right_klass = op->right_klass();
1582
1583 // (2) Inline type check -- if either of the operands is not a inline type,
1584 // they are not substitutable. We do this only if we are not sure that the
1585 // operands are inline type
1586 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1587 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1588 Register tmp1 = op->tmp1()->as_register();
1589 __ mov(tmp1, markWord::inline_type_pattern);
1590 __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1591 __ andr(tmp1, tmp1, rscratch1);
1592 __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1593 __ andr(tmp1, tmp1, rscratch1);
1594 __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1595 __ br(Assembler::NE, L_oops_not_equal);
1596 }
1597
1598 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1599 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1600 // No need to load klass -- the operands are statically known to be the same inline klass.
1601 __ b(*op->stub()->entry());
1602 } else {
1603 Register left_klass_op = op->left_klass_op()->as_register();
1604 Register right_klass_op = op->right_klass_op()->as_register();
1605
1606 if (UseCompressedClassPointers) {
1607 __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1608 __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1609 __ cmpw(left_klass_op, right_klass_op);
1610 } else {
1611 __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1612 __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1613 __ cmp(left_klass_op, right_klass_op);
1614 }
1615
1616 __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1617 // fall through to L_oops_not_equal
1618 }
1619
1620 __ bind(L_oops_not_equal);
1621 move(op->not_equal_result(), op->result_opr());
1622 __ b(L_end);
1623
1624 __ bind(L_oops_equal);
1625 move(op->equal_result(), op->result_opr());
1626 __ b(L_end);
1627
1628 // We've returned from the stub. R0 contains 0x0 IFF the two
1629 // operands are not substitutable. (Don't compare against 0x1 in case the
1630 // C compiler is naughty)
1631 __ bind(*op->stub()->continuation());
1632 __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1633 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1634 // fall-through
1635 __ bind(L_end);
1636 }
1637
1638
1639 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1640 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1641 __ cset(rscratch1, Assembler::NE);
1642 __ membar(__ AnyAny);
1643 }
1644
1645 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1646 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1647 __ cset(rscratch1, Assembler::NE);
1648 __ membar(__ AnyAny);
1649 }
1650
1651
1652 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1653 Register addr;
1654 if (op->addr()->is_register()) {
1655 addr = as_reg(op->addr());
1656 } else {
1657 assert(op->addr()->is_address(), "what else?");
1658 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
2135 __ cmp(left->as_register_lo(), right->as_register_lo());
2136 __ mov(dst->as_register(), (uint64_t)-1L);
2137 __ br(Assembler::LT, done);
2138 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2139 __ bind(done);
2140 } else {
2141 ShouldNotReachHere();
2142 }
2143 }
2144
2145
2146 void LIR_Assembler::align_call(LIR_Code code) { }
2147
2148
2149 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2150 address call = __ trampoline_call(Address(op->addr(), rtype));
2151 if (call == nullptr) {
2152 bailout("trampoline stub overflow");
2153 return;
2154 }
2155 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2156 __ post_call_nop();
2157 }
2158
2159
2160 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2161 address call = __ ic_call(op->addr());
2162 if (call == nullptr) {
2163 bailout("trampoline stub overflow");
2164 return;
2165 }
2166 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2167 __ post_call_nop();
2168 }
2169
2170 void LIR_Assembler::emit_static_call_stub() {
2171 address call_pc = __ pc();
2172 address stub = __ start_a_stub(call_stub_size());
2173 if (stub == nullptr) {
2174 bailout("static call stub overflow");
2175 return;
2176 }
2177
2178 int start = __ offset();
2179
2180 __ relocate(static_stub_Relocation::spec(call_pc));
2181 __ emit_static_call_stub();
2182
2183 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2184 <= call_stub_size(), "stub too big");
2185 __ end_a_stub();
2186 }
2309
2310
2311 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2312 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2313 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2314 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2315 __ mov (rscratch1, c);
2316 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2317 }
2318
2319
2320 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2321 ShouldNotReachHere();
2322 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2323 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2324 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2325 __ lea(rscratch1, __ constant_oop_address(o));
2326 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2327 }
2328
2329 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2330 if (null_check) {
2331 __ cbz(obj, *slow_path->entry());
2332 }
2333 if (is_dest) {
2334 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2335 } else {
2336 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2337 }
2338 }
2339
2340 // This code replaces a call to arraycopy; no exception may
2341 // be thrown in this code, they must be thrown in the System.arraycopy
2342 // activation frame; we could save some checks if this would not be the case
2343 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2344 ciArrayKlass* default_type = op->expected_type();
2345 Register src = op->src()->as_register();
2346 Register dst = op->dst()->as_register();
2347 Register src_pos = op->src_pos()->as_register();
2348 Register dst_pos = op->dst_pos()->as_register();
2349 Register length = op->length()->as_register();
2350 Register tmp = op->tmp()->as_register();
2351
2352 CodeStub* stub = op->stub();
2353 int flags = op->flags();
2354 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2355 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2356
2357 if (flags & LIR_OpArrayCopy::always_slow_path) {
2358 __ b(*stub->entry());
2359 __ bind(*stub->continuation());
2360 return;
2361 }
2362
2363 // if we don't know anything, just go through the generic arraycopy
2364 if (default_type == nullptr // || basic_type == T_OBJECT
2365 ) {
2366 Label done;
2367 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2368
2369 // Save the arguments in case the generic arraycopy fails and we
2370 // have to fall back to the JNI stub
2371 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2372 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2373 __ str(src, Address(sp, 4*BytesPerWord));
2374
2375 address copyfunc_addr = StubRoutines::generic_arraycopy();
2376 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2377
2378 // The arguments are in java calling convention so we shift them
2379 // to C convention
2380 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2381 __ mov(c_rarg0, j_rarg0);
2382 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2396 __ cbz(r0, *stub->continuation());
2397
2398 // Reload values from the stack so they are where the stub
2399 // expects them.
2400 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2401 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2402 __ ldr(src, Address(sp, 4*BytesPerWord));
2403
2404 // r0 is -1^K where K == partial copied count
2405 __ eonw(rscratch1, r0, zr);
2406 // adjust length down and src/end pos up by partial copied count
2407 __ subw(length, length, rscratch1);
2408 __ addw(src_pos, src_pos, rscratch1);
2409 __ addw(dst_pos, dst_pos, rscratch1);
2410 __ b(*stub->entry());
2411
2412 __ bind(*stub->continuation());
2413 return;
2414 }
2415
2416 // Handle inline type arrays
2417 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2418 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2419 }
2420
2421 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2422 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2423 }
2424
2425 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2426
2427 int elem_size = type2aelembytes(basic_type);
2428 int scale = exact_log2(elem_size);
2429
2430 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2431 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2432 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2433 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2434
2435 // test for null
2436 if (flags & LIR_OpArrayCopy::src_null_check) {
2437 __ cbz(src, *stub->entry());
2438 }
2439 if (flags & LIR_OpArrayCopy::dst_null_check) {
2440 __ cbz(dst, *stub->entry());
2441 }
2442
2443 // If the compiler was not able to prove that exact type of the source or the destination
2444 // of the arraycopy is an array type, check at runtime if the source or the destination is
2994 __ verify_klass_ptr(tmp);
2995 #endif
2996 } else {
2997 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2998 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2999
3000 __ ldr(tmp, mdo_addr);
3001 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
3002
3003 __ orr(tmp, tmp, TypeEntries::type_unknown);
3004 __ str(tmp, mdo_addr);
3005 // FIXME: Write barrier needed here?
3006 }
3007 }
3008
3009 __ bind(next);
3010 }
3011 COMMENT("} emit_profile_type");
3012 }
3013
3014 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3015 Register obj = op->obj()->as_register();
3016 Register tmp = op->tmp()->as_pointer_register();
3017 bool not_null = op->not_null();
3018 int flag = op->flag();
3019
3020 Label not_inline_type;
3021 if (!not_null) {
3022 __ cbz(obj, not_inline_type);
3023 }
3024
3025 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3026
3027 Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
3028 __ ldrb(rscratch1, mdo_addr);
3029 __ orr(rscratch1, rscratch1, flag);
3030 __ strb(rscratch1, mdo_addr);
3031
3032 __ bind(not_inline_type);
3033 }
3034
3035 void LIR_Assembler::align_backward_branch_target() {
3036 }
3037
3038
3039 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3040 // tmp must be unused
3041 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3042
3043 if (left->is_single_cpu()) {
3044 assert(dest->is_single_cpu(), "expect single result reg");
3045 __ negw(dest->as_register(), left->as_register());
3046 } else if (left->is_double_cpu()) {
3047 assert(dest->is_double_cpu(), "expect double result reg");
3048 __ neg(dest->as_register_lo(), left->as_register_lo());
3049 } else if (left->is_single_fpu()) {
3050 assert(dest->is_single_fpu(), "expect single float result reg");
3051 __ fnegs(dest->as_float_reg(), left->as_float_reg());
3052 } else {
3053 assert(left->is_double_fpu(), "expect double float operand reg");
3154 void LIR_Assembler::membar_loadload() {
3155 __ membar(Assembler::LoadLoad);
3156 }
3157
3158 void LIR_Assembler::membar_storestore() {
3159 __ membar(MacroAssembler::StoreStore);
3160 }
3161
3162 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3163
3164 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3165
3166 void LIR_Assembler::on_spin_wait() {
3167 __ spin_wait();
3168 }
3169
3170 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3171 __ mov(result_reg->as_register(), rthread);
3172 }
3173
3174 void LIR_Assembler::check_orig_pc() {
3175 __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3176 __ cmp(rscratch2, (u1)NULL_WORD);
3177 }
3178
3179 void LIR_Assembler::peephole(LIR_List *lir) {
3180 #if 0
3181 if (tableswitch_count >= max_tableswitches)
3182 return;
3183
3184 /*
3185 This finite-state automaton recognizes sequences of compare-and-
3186 branch instructions. We will turn them into a tableswitch. You
3187 could argue that C1 really shouldn't be doing this sort of
3188 optimization, but without it the code is really horrible.
3189 */
3190
3191 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3192 int first_key, last_key = -2147483648;
3193 int next_key = 0;
3194 int start_insn = -1;
3195 int last_insn = -1;
3196 Register reg = noreg;
3197 LIR_Opr reg_opr;
|