16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_aarch64.inline.hpp"
47
48
49 #ifndef PRODUCT
50 #define COMMENT(x) do { __ block_comment(x); } while (0)
51 #else
52 #define COMMENT(x)
53 #endif
54
55 NEEDS_CLEANUP // remove this definitions ?
56 const Register SYNC_header = r0; // synchronization header
57 const Register SHIFT_count = r0; // where count for shift operations must be
58
59 #define __ _masm->
60
61
414 if (LockingMode == LM_MONITOR) {
415 __ b(*stub->entry());
416 } else {
417 __ unlock_object(r5, r4, r0, r6, *stub->entry());
418 }
419 __ bind(*stub->continuation());
420 }
421
422 if (compilation()->env()->dtrace_method_probes()) {
423 __ mov(c_rarg0, rthread);
424 __ mov_metadata(c_rarg1, method()->constant_encoding());
425 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
426 }
427
428 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
429 __ mov(r0, r19); // Restore the exception
430 }
431
432 // remove the activation and dispatch to the unwind handler
433 __ block_comment("remove_frame and dispatch to the unwind handler");
434 __ remove_frame(initial_frame_size_in_bytes());
435 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
436
437 // Emit the slow path assembly
438 if (stub != nullptr) {
439 stub->emit_code(this);
440 }
441
442 return offset;
443 }
444
445
446 int LIR_Assembler::emit_deopt_handler() {
447 // generate code for exception handler
448 address handler_base = __ start_a_stub(deopt_handler_size());
449 if (handler_base == nullptr) {
450 // not enough space left for the handler
451 bailout("deopt handler overflow");
452 return -1;
453 }
454
458 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
459 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
460 __ end_a_stub();
461
462 return offset;
463 }
464
465 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
466 _masm->code_section()->relocate(adr, relocInfo::poll_type);
467 int pc_offset = code_offset();
468 flush_debug_info(pc_offset);
469 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
470 if (info->exception_handlers() != nullptr) {
471 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
472 }
473 }
474
475 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
476 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
477
478 // Pop the stack before the safepoint code
479 __ remove_frame(initial_frame_size_in_bytes());
480
481 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
482 __ reserved_stack_check();
483 }
484
485 code_stub->set_safepoint_offset(__ offset());
486 __ relocate(relocInfo::poll_return_type);
487 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
488 __ ret(lr);
489 }
490
491 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
492 guarantee(info != nullptr, "Shouldn't be null");
493 __ get_polling_page(rscratch1, relocInfo::poll_type);
494 add_debug_info_for_branch(info); // This isn't just debug info:
495 // it's the oop map
496 __ read_polling_page(rscratch1, relocInfo::poll_type);
497 return __ offset();
498 }
499
500
501 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
502 if (from_reg == r31_sp)
503 from_reg = sp;
504 if (to_reg == r31_sp)
505 to_reg = sp;
506 __ mov(to_reg, from_reg);
507 }
508
509 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
510
517 switch (c->type()) {
518 case T_INT: {
519 assert(patch_code == lir_patch_none, "no patching handled here");
520 __ movw(dest->as_register(), c->as_jint());
521 break;
522 }
523
524 case T_ADDRESS: {
525 assert(patch_code == lir_patch_none, "no patching handled here");
526 __ mov(dest->as_register(), c->as_jint());
527 break;
528 }
529
530 case T_LONG: {
531 assert(patch_code == lir_patch_none, "no patching handled here");
532 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
533 break;
534 }
535
536 case T_OBJECT: {
537 if (patch_code == lir_patch_none) {
538 jobject2reg(c->as_jobject(), dest->as_register());
539 } else {
540 jobject2reg_with_patching(dest->as_register(), info);
541 }
542 break;
543 }
544
545 case T_METADATA: {
546 if (patch_code != lir_patch_none) {
547 klass2reg_with_patching(dest->as_register(), info);
548 } else {
549 __ mov_metadata(dest->as_register(), c->as_metadata());
550 }
551 break;
552 }
553
554 case T_FLOAT: {
555 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
556 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
557 } else {
558 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
559 __ ldrs(dest->as_float_reg(), Address(rscratch1));
560 }
630 LIR_Const* c = src->as_constant_ptr();
631 LIR_Address* to_addr = dest->as_address_ptr();
632
633 void (Assembler::* insn)(Register Rt, const Address &adr);
634
635 switch (type) {
636 case T_ADDRESS:
637 assert(c->as_jint() == 0, "should be");
638 insn = &Assembler::str;
639 break;
640 case T_LONG:
641 assert(c->as_jlong() == 0, "should be");
642 insn = &Assembler::str;
643 break;
644 case T_INT:
645 assert(c->as_jint() == 0, "should be");
646 insn = &Assembler::strw;
647 break;
648 case T_OBJECT:
649 case T_ARRAY:
650 assert(c->as_jobject() == 0, "should be");
651 if (UseCompressedOops && !wide) {
652 insn = &Assembler::strw;
653 } else {
654 insn = &Assembler::str;
655 }
656 break;
657 case T_CHAR:
658 case T_SHORT:
659 assert(c->as_jint() == 0, "should be");
660 insn = &Assembler::strh;
661 break;
662 case T_BOOLEAN:
663 case T_BYTE:
664 assert(c->as_jint() == 0, "should be");
665 insn = &Assembler::strb;
666 break;
667 default:
668 ShouldNotReachHere();
669 insn = &Assembler::str; // unreachable
980 case T_SHORT:
981 __ ldrsh(dest->as_register(), as_Address(from_addr));
982 break;
983
984 default:
985 ShouldNotReachHere();
986 }
987
988 if (is_reference_type(type)) {
989 if (UseCompressedOops && !wide) {
990 __ decode_heap_oop(dest->as_register());
991 }
992
993 if (!(UseZGC && !ZGenerational)) {
994 // Load barrier has not yet been applied, so ZGC can't verify the oop here
995 __ verify_oop(dest->as_register());
996 }
997 }
998 }
999
1000
1001 int LIR_Assembler::array_element_size(BasicType type) const {
1002 int elem_size = type2aelembytes(type);
1003 return exact_log2(elem_size);
1004 }
1005
1006
1007 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1008 switch (op->code()) {
1009 case lir_idiv:
1010 case lir_irem:
1011 arithmetic_idiv(op->code(),
1012 op->in_opr1(),
1013 op->in_opr2(),
1014 op->in_opr3(),
1015 op->result_opr(),
1016 op->info());
1017 break;
1018 case lir_fmad:
1019 __ fmaddd(op->result_opr()->as_double_reg(),
1171 __ ldrb(rscratch1, Address(op->klass()->as_register(),
1172 InstanceKlass::init_state_offset()));
1173 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1174 add_debug_info_for_null_check_here(op->stub()->info());
1175 __ br(Assembler::NE, *op->stub()->entry());
1176 }
1177 __ allocate_object(op->obj()->as_register(),
1178 op->tmp1()->as_register(),
1179 op->tmp2()->as_register(),
1180 op->header_size(),
1181 op->object_size(),
1182 op->klass()->as_register(),
1183 *op->stub()->entry());
1184 __ bind(*op->stub()->continuation());
1185 }
1186
1187 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1188 Register len = op->len()->as_register();
1189 __ uxtw(len, len);
1190
1191 if (UseSlowPath ||
1192 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1193 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1194 __ b(*op->stub()->entry());
1195 } else {
1196 Register tmp1 = op->tmp1()->as_register();
1197 Register tmp2 = op->tmp2()->as_register();
1198 Register tmp3 = op->tmp3()->as_register();
1199 if (len == tmp1) {
1200 tmp1 = tmp3;
1201 } else if (len == tmp2) {
1202 tmp2 = tmp3;
1203 } else if (len == tmp3) {
1204 // everything is ok
1205 } else {
1206 __ mov(tmp3, len);
1207 }
1208 __ allocate_array(op->obj()->as_register(),
1209 len,
1210 tmp1,
1211 tmp2,
1276 assert(data != nullptr, "need data for type check");
1277 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1278 }
1279 Label* success_target = success;
1280 Label* failure_target = failure;
1281
1282 if (obj == k_RInfo) {
1283 k_RInfo = dst;
1284 } else if (obj == klass_RInfo) {
1285 klass_RInfo = dst;
1286 }
1287 if (k->is_loaded() && !UseCompressedClassPointers) {
1288 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1289 } else {
1290 Rtmp1 = op->tmp3()->as_register();
1291 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1292 }
1293
1294 assert_different_registers(obj, k_RInfo, klass_RInfo);
1295
1296 if (should_profile) {
1297 Register mdo = klass_RInfo;
1298 __ mov_metadata(mdo, md->constant_encoding());
1299 Label not_null;
1300 __ cbnz(obj, not_null);
1301 // Object is null; update MDO and exit
1302 Address data_addr
1303 = __ form_address(rscratch2, mdo,
1304 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1305 0);
1306 __ ldrb(rscratch1, data_addr);
1307 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1308 __ strb(rscratch1, data_addr);
1309 __ b(*obj_is_null);
1310 __ bind(not_null);
1311
1312 Label update_done;
1313 Register recv = k_RInfo;
1314 __ load_klass(recv, obj);
1315 type_profile_helper(mdo, md, data, recv, &update_done);
1316 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1317 __ addptr(counter_addr, DataLayout::counter_increment);
1318
1319 __ bind(update_done);
1320 } else {
1321 __ cbz(obj, *obj_is_null);
1322 }
1323
1324 if (!k->is_loaded()) {
1325 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1326 } else {
1327 __ mov_metadata(k_RInfo, k->constant_encoding());
1328 }
1329 __ verify_oop(obj);
1330
1331 if (op->fast_check()) {
1332 // get object class
1333 // not a safepoint as obj null check happens earlier
1334 __ load_klass(rscratch1, obj);
1335 __ cmp( rscratch1, k_RInfo);
1336
1337 __ br(Assembler::NE, *failure_target);
1338 // successful cast, fall through to profile or jump
1339 } else {
1340 // get object class
1341 // not a safepoint as obj null check happens earlier
1460 __ bind(success);
1461 if (dst != obj) {
1462 __ mov(dst, obj);
1463 }
1464 } else if (code == lir_instanceof) {
1465 Register obj = op->object()->as_register();
1466 Register dst = op->result_opr()->as_register();
1467 Label success, failure, done;
1468 emit_typecheck_helper(op, &success, &failure, &failure);
1469 __ bind(failure);
1470 __ mov(dst, zr);
1471 __ b(done);
1472 __ bind(success);
1473 __ mov(dst, 1);
1474 __ bind(done);
1475 } else {
1476 ShouldNotReachHere();
1477 }
1478 }
1479
1480 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1481 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1482 __ cset(rscratch1, Assembler::NE);
1483 __ membar(__ AnyAny);
1484 }
1485
1486 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1487 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1488 __ cset(rscratch1, Assembler::NE);
1489 __ membar(__ AnyAny);
1490 }
1491
1492
1493 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1494 Register addr;
1495 if (op->addr()->is_register()) {
1496 addr = as_reg(op->addr());
1497 } else {
1498 assert(op->addr()->is_address(), "what else?");
1499 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1976 __ cmp(left->as_register_lo(), right->as_register_lo());
1977 __ mov(dst->as_register(), (uint64_t)-1L);
1978 __ br(Assembler::LT, done);
1979 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1980 __ bind(done);
1981 } else {
1982 ShouldNotReachHere();
1983 }
1984 }
1985
1986
1987 void LIR_Assembler::align_call(LIR_Code code) { }
1988
1989
1990 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1991 address call = __ trampoline_call(Address(op->addr(), rtype));
1992 if (call == nullptr) {
1993 bailout("trampoline stub overflow");
1994 return;
1995 }
1996 add_call_info(code_offset(), op->info());
1997 __ post_call_nop();
1998 }
1999
2000
2001 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2002 address call = __ ic_call(op->addr());
2003 if (call == nullptr) {
2004 bailout("trampoline stub overflow");
2005 return;
2006 }
2007 add_call_info(code_offset(), op->info());
2008 __ post_call_nop();
2009 }
2010
2011 void LIR_Assembler::emit_static_call_stub() {
2012 address call_pc = __ pc();
2013 address stub = __ start_a_stub(call_stub_size());
2014 if (stub == nullptr) {
2015 bailout("static call stub overflow");
2016 return;
2017 }
2018
2019 int start = __ offset();
2020
2021 __ relocate(static_stub_Relocation::spec(call_pc));
2022 __ emit_static_call_stub();
2023
2024 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2025 <= call_stub_size(), "stub too big");
2026 __ end_a_stub();
2027 }
2150
2151
2152 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2153 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2154 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2155 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2156 __ mov (rscratch1, c);
2157 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2158 }
2159
2160
2161 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2162 ShouldNotReachHere();
2163 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2164 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2165 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2166 __ lea(rscratch1, __ constant_oop_address(o));
2167 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2168 }
2169
2170
2171 // This code replaces a call to arraycopy; no exception may
2172 // be thrown in this code, they must be thrown in the System.arraycopy
2173 // activation frame; we could save some checks if this would not be the case
2174 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2175 ciArrayKlass* default_type = op->expected_type();
2176 Register src = op->src()->as_register();
2177 Register dst = op->dst()->as_register();
2178 Register src_pos = op->src_pos()->as_register();
2179 Register dst_pos = op->dst_pos()->as_register();
2180 Register length = op->length()->as_register();
2181 Register tmp = op->tmp()->as_register();
2182
2183 CodeStub* stub = op->stub();
2184 int flags = op->flags();
2185 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2186 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2187
2188 // if we don't know anything, just go through the generic arraycopy
2189 if (default_type == nullptr // || basic_type == T_OBJECT
2190 ) {
2191 Label done;
2192 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2193
2194 // Save the arguments in case the generic arraycopy fails and we
2195 // have to fall back to the JNI stub
2196 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2197 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2198 __ str(src, Address(sp, 4*BytesPerWord));
2199
2200 address copyfunc_addr = StubRoutines::generic_arraycopy();
2201 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2202
2203 // The arguments are in java calling convention so we shift them
2204 // to C convention
2205 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2206 __ mov(c_rarg0, j_rarg0);
2207 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2221 __ cbz(r0, *stub->continuation());
2222
2223 // Reload values from the stack so they are where the stub
2224 // expects them.
2225 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2226 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2227 __ ldr(src, Address(sp, 4*BytesPerWord));
2228
2229 // r0 is -1^K where K == partial copied count
2230 __ eonw(rscratch1, r0, zr);
2231 // adjust length down and src/end pos up by partial copied count
2232 __ subw(length, length, rscratch1);
2233 __ addw(src_pos, src_pos, rscratch1);
2234 __ addw(dst_pos, dst_pos, rscratch1);
2235 __ b(*stub->entry());
2236
2237 __ bind(*stub->continuation());
2238 return;
2239 }
2240
2241 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2242
2243 int elem_size = type2aelembytes(basic_type);
2244 int scale = exact_log2(elem_size);
2245
2246 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2247 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2248 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2249 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2250
2251 // test for null
2252 if (flags & LIR_OpArrayCopy::src_null_check) {
2253 __ cbz(src, *stub->entry());
2254 }
2255 if (flags & LIR_OpArrayCopy::dst_null_check) {
2256 __ cbz(dst, *stub->entry());
2257 }
2258
2259 // If the compiler was not able to prove that exact type of the source or the destination
2260 // of the arraycopy is an array type, check at runtime if the source or the destination is
2808 __ verify_klass_ptr(tmp);
2809 #endif
2810 } else {
2811 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2812 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2813
2814 __ ldr(tmp, mdo_addr);
2815 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2816
2817 __ orr(tmp, tmp, TypeEntries::type_unknown);
2818 __ str(tmp, mdo_addr);
2819 // FIXME: Write barrier needed here?
2820 }
2821 }
2822
2823 __ bind(next);
2824 }
2825 COMMENT("} emit_profile_type");
2826 }
2827
2828
2829 void LIR_Assembler::align_backward_branch_target() {
2830 }
2831
2832
2833 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2834 // tmp must be unused
2835 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2836
2837 if (left->is_single_cpu()) {
2838 assert(dest->is_single_cpu(), "expect single result reg");
2839 __ negw(dest->as_register(), left->as_register());
2840 } else if (left->is_double_cpu()) {
2841 assert(dest->is_double_cpu(), "expect double result reg");
2842 __ neg(dest->as_register_lo(), left->as_register_lo());
2843 } else if (left->is_single_fpu()) {
2844 assert(dest->is_single_fpu(), "expect single float result reg");
2845 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2846 } else {
2847 assert(left->is_double_fpu(), "expect double float operand reg");
2948 void LIR_Assembler::membar_loadload() {
2949 __ membar(Assembler::LoadLoad);
2950 }
2951
2952 void LIR_Assembler::membar_storestore() {
2953 __ membar(MacroAssembler::StoreStore);
2954 }
2955
2956 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2957
2958 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2959
2960 void LIR_Assembler::on_spin_wait() {
2961 __ spin_wait();
2962 }
2963
2964 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2965 __ mov(result_reg->as_register(), rthread);
2966 }
2967
2968
2969 void LIR_Assembler::peephole(LIR_List *lir) {
2970 #if 0
2971 if (tableswitch_count >= max_tableswitches)
2972 return;
2973
2974 /*
2975 This finite-state automaton recognizes sequences of compare-and-
2976 branch instructions. We will turn them into a tableswitch. You
2977 could argue that C1 really shouldn't be doing this sort of
2978 optimization, but without it the code is really horrible.
2979 */
2980
2981 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2982 int first_key, last_key = -2147483648;
2983 int next_key = 0;
2984 int start_insn = -1;
2985 int last_insn = -1;
2986 Register reg = noreg;
2987 LIR_Opr reg_opr;
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInlineKlass.hpp"
37 #include "ci/ciInstance.hpp"
38 #include "code/compiledIC.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gc_globals.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_aarch64.inline.hpp"
49
50
51 #ifndef PRODUCT
52 #define COMMENT(x) do { __ block_comment(x); } while (0)
53 #else
54 #define COMMENT(x)
55 #endif
56
57 NEEDS_CLEANUP // remove this definitions ?
58 const Register SYNC_header = r0; // synchronization header
59 const Register SHIFT_count = r0; // where count for shift operations must be
60
61 #define __ _masm->
62
63
416 if (LockingMode == LM_MONITOR) {
417 __ b(*stub->entry());
418 } else {
419 __ unlock_object(r5, r4, r0, r6, *stub->entry());
420 }
421 __ bind(*stub->continuation());
422 }
423
424 if (compilation()->env()->dtrace_method_probes()) {
425 __ mov(c_rarg0, rthread);
426 __ mov_metadata(c_rarg1, method()->constant_encoding());
427 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
428 }
429
430 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
431 __ mov(r0, r19); // Restore the exception
432 }
433
434 // remove the activation and dispatch to the unwind handler
435 __ block_comment("remove_frame and dispatch to the unwind handler");
436 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
437 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
438
439 // Emit the slow path assembly
440 if (stub != nullptr) {
441 stub->emit_code(this);
442 }
443
444 return offset;
445 }
446
447
448 int LIR_Assembler::emit_deopt_handler() {
449 // generate code for exception handler
450 address handler_base = __ start_a_stub(deopt_handler_size());
451 if (handler_base == nullptr) {
452 // not enough space left for the handler
453 bailout("deopt handler overflow");
454 return -1;
455 }
456
460 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
461 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
462 __ end_a_stub();
463
464 return offset;
465 }
466
467 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
468 _masm->code_section()->relocate(adr, relocInfo::poll_type);
469 int pc_offset = code_offset();
470 flush_debug_info(pc_offset);
471 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
472 if (info->exception_handlers() != nullptr) {
473 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
474 }
475 }
476
477 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
478 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
479
480 if (InlineTypeReturnedAsFields) {
481 // Check if we are returning an non-null inline type and load its fields into registers
482 ciType* return_type = compilation()->method()->return_type();
483 if (return_type->is_inlinetype()) {
484 ciInlineKlass* vk = return_type->as_inline_klass();
485 if (vk->can_be_returned_as_fields()) {
486 address unpack_handler = vk->unpack_handler();
487 assert(unpack_handler != nullptr, "must be");
488 __ far_call(RuntimeAddress(unpack_handler));
489 }
490 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
491 Label skip;
492 __ test_oop_is_not_inline_type(r0, rscratch2, skip);
493
494 // Load fields from a buffered value with an inline class specific handler
495 __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
496 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
497 __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
498 // Unpack handler can be null if inline type is not scalarizable in returns
499 __ cbz(rscratch1, skip);
500 __ blr(rscratch1);
501
502 __ bind(skip);
503 }
504 // At this point, r0 points to the value object (for interpreter or C1 caller).
505 // The fields of the object are copied into registers (for C2 caller).
506 }
507
508 // Pop the stack before the safepoint code
509 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
510
511 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
512 __ reserved_stack_check();
513 }
514
515 code_stub->set_safepoint_offset(__ offset());
516 __ relocate(relocInfo::poll_return_type);
517 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
518 __ ret(lr);
519 }
520
521 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
522 return (__ store_inline_type_fields_to_buf(vk, false));
523 }
524
525 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
526 guarantee(info != nullptr, "Shouldn't be null");
527 __ get_polling_page(rscratch1, relocInfo::poll_type);
528 add_debug_info_for_branch(info); // This isn't just debug info:
529 // it's the oop map
530 __ read_polling_page(rscratch1, relocInfo::poll_type);
531 return __ offset();
532 }
533
534
535 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
536 if (from_reg == r31_sp)
537 from_reg = sp;
538 if (to_reg == r31_sp)
539 to_reg = sp;
540 __ mov(to_reg, from_reg);
541 }
542
543 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
544
551 switch (c->type()) {
552 case T_INT: {
553 assert(patch_code == lir_patch_none, "no patching handled here");
554 __ movw(dest->as_register(), c->as_jint());
555 break;
556 }
557
558 case T_ADDRESS: {
559 assert(patch_code == lir_patch_none, "no patching handled here");
560 __ mov(dest->as_register(), c->as_jint());
561 break;
562 }
563
564 case T_LONG: {
565 assert(patch_code == lir_patch_none, "no patching handled here");
566 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
567 break;
568 }
569
570 case T_OBJECT: {
571 if (patch_code != lir_patch_none) {
572 jobject2reg_with_patching(dest->as_register(), info);
573 } else {
574 jobject2reg(c->as_jobject(), dest->as_register());
575 }
576 break;
577 }
578
579 case T_METADATA: {
580 if (patch_code != lir_patch_none) {
581 klass2reg_with_patching(dest->as_register(), info);
582 } else {
583 __ mov_metadata(dest->as_register(), c->as_metadata());
584 }
585 break;
586 }
587
588 case T_FLOAT: {
589 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
590 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
591 } else {
592 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
593 __ ldrs(dest->as_float_reg(), Address(rscratch1));
594 }
664 LIR_Const* c = src->as_constant_ptr();
665 LIR_Address* to_addr = dest->as_address_ptr();
666
667 void (Assembler::* insn)(Register Rt, const Address &adr);
668
669 switch (type) {
670 case T_ADDRESS:
671 assert(c->as_jint() == 0, "should be");
672 insn = &Assembler::str;
673 break;
674 case T_LONG:
675 assert(c->as_jlong() == 0, "should be");
676 insn = &Assembler::str;
677 break;
678 case T_INT:
679 assert(c->as_jint() == 0, "should be");
680 insn = &Assembler::strw;
681 break;
682 case T_OBJECT:
683 case T_ARRAY:
684 // Non-null case is not handled on aarch64 but handled on x86
685 // FIXME: do we need to add it here?
686 assert(c->as_jobject() == 0, "should be");
687 if (UseCompressedOops && !wide) {
688 insn = &Assembler::strw;
689 } else {
690 insn = &Assembler::str;
691 }
692 break;
693 case T_CHAR:
694 case T_SHORT:
695 assert(c->as_jint() == 0, "should be");
696 insn = &Assembler::strh;
697 break;
698 case T_BOOLEAN:
699 case T_BYTE:
700 assert(c->as_jint() == 0, "should be");
701 insn = &Assembler::strb;
702 break;
703 default:
704 ShouldNotReachHere();
705 insn = &Assembler::str; // unreachable
1016 case T_SHORT:
1017 __ ldrsh(dest->as_register(), as_Address(from_addr));
1018 break;
1019
1020 default:
1021 ShouldNotReachHere();
1022 }
1023
1024 if (is_reference_type(type)) {
1025 if (UseCompressedOops && !wide) {
1026 __ decode_heap_oop(dest->as_register());
1027 }
1028
1029 if (!(UseZGC && !ZGenerational)) {
1030 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1031 __ verify_oop(dest->as_register());
1032 }
1033 }
1034 }
1035
1036 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1037 assert(dst->is_cpu_register(), "must be");
1038 assert(dst->type() == src->type(), "must be");
1039
1040 if (src->is_cpu_register()) {
1041 reg2reg(src, dst);
1042 } else if (src->is_stack()) {
1043 stack2reg(src, dst, dst->type());
1044 } else if (src->is_constant()) {
1045 const2reg(src, dst, lir_patch_none, nullptr);
1046 } else {
1047 ShouldNotReachHere();
1048 }
1049 }
1050
1051 int LIR_Assembler::array_element_size(BasicType type) const {
1052 int elem_size = type2aelembytes(type);
1053 return exact_log2(elem_size);
1054 }
1055
1056
1057 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1058 switch (op->code()) {
1059 case lir_idiv:
1060 case lir_irem:
1061 arithmetic_idiv(op->code(),
1062 op->in_opr1(),
1063 op->in_opr2(),
1064 op->in_opr3(),
1065 op->result_opr(),
1066 op->info());
1067 break;
1068 case lir_fmad:
1069 __ fmaddd(op->result_opr()->as_double_reg(),
1221 __ ldrb(rscratch1, Address(op->klass()->as_register(),
1222 InstanceKlass::init_state_offset()));
1223 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1224 add_debug_info_for_null_check_here(op->stub()->info());
1225 __ br(Assembler::NE, *op->stub()->entry());
1226 }
1227 __ allocate_object(op->obj()->as_register(),
1228 op->tmp1()->as_register(),
1229 op->tmp2()->as_register(),
1230 op->header_size(),
1231 op->object_size(),
1232 op->klass()->as_register(),
1233 *op->stub()->entry());
1234 __ bind(*op->stub()->continuation());
1235 }
1236
1237 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1238 Register len = op->len()->as_register();
1239 __ uxtw(len, len);
1240
1241 if (UseSlowPath || op->is_null_free() ||
1242 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1243 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1244 __ b(*op->stub()->entry());
1245 } else {
1246 Register tmp1 = op->tmp1()->as_register();
1247 Register tmp2 = op->tmp2()->as_register();
1248 Register tmp3 = op->tmp3()->as_register();
1249 if (len == tmp1) {
1250 tmp1 = tmp3;
1251 } else if (len == tmp2) {
1252 tmp2 = tmp3;
1253 } else if (len == tmp3) {
1254 // everything is ok
1255 } else {
1256 __ mov(tmp3, len);
1257 }
1258 __ allocate_array(op->obj()->as_register(),
1259 len,
1260 tmp1,
1261 tmp2,
1326 assert(data != nullptr, "need data for type check");
1327 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1328 }
1329 Label* success_target = success;
1330 Label* failure_target = failure;
1331
1332 if (obj == k_RInfo) {
1333 k_RInfo = dst;
1334 } else if (obj == klass_RInfo) {
1335 klass_RInfo = dst;
1336 }
1337 if (k->is_loaded() && !UseCompressedClassPointers) {
1338 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1339 } else {
1340 Rtmp1 = op->tmp3()->as_register();
1341 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1342 }
1343
1344 assert_different_registers(obj, k_RInfo, klass_RInfo);
1345
1346 if (op->need_null_check()) {
1347 if (should_profile) {
1348 Register mdo = klass_RInfo;
1349 __ mov_metadata(mdo, md->constant_encoding());
1350 Label not_null;
1351 __ cbnz(obj, not_null);
1352 // Object is null; update MDO and exit
1353 Address data_addr
1354 = __ form_address(rscratch2, mdo,
1355 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1356 0);
1357 __ ldrb(rscratch1, data_addr);
1358 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1359 __ strb(rscratch1, data_addr);
1360 __ b(*obj_is_null);
1361 __ bind(not_null);
1362
1363 Label update_done;
1364 Register recv = k_RInfo;
1365 __ load_klass(recv, obj);
1366 type_profile_helper(mdo, md, data, recv, &update_done);
1367 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1368 __ addptr(counter_addr, DataLayout::counter_increment);
1369
1370 __ bind(update_done);
1371 } else {
1372 __ cbz(obj, *obj_is_null);
1373 }
1374 }
1375
1376 if (!k->is_loaded()) {
1377 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1378 } else {
1379 __ mov_metadata(k_RInfo, k->constant_encoding());
1380 }
1381 __ verify_oop(obj);
1382
1383 if (op->fast_check()) {
1384 // get object class
1385 // not a safepoint as obj null check happens earlier
1386 __ load_klass(rscratch1, obj);
1387 __ cmp( rscratch1, k_RInfo);
1388
1389 __ br(Assembler::NE, *failure_target);
1390 // successful cast, fall through to profile or jump
1391 } else {
1392 // get object class
1393 // not a safepoint as obj null check happens earlier
1512 __ bind(success);
1513 if (dst != obj) {
1514 __ mov(dst, obj);
1515 }
1516 } else if (code == lir_instanceof) {
1517 Register obj = op->object()->as_register();
1518 Register dst = op->result_opr()->as_register();
1519 Label success, failure, done;
1520 emit_typecheck_helper(op, &success, &failure, &failure);
1521 __ bind(failure);
1522 __ mov(dst, zr);
1523 __ b(done);
1524 __ bind(success);
1525 __ mov(dst, 1);
1526 __ bind(done);
1527 } else {
1528 ShouldNotReachHere();
1529 }
1530 }
1531
1532 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1533 // We are loading/storing from/to an array that *may* be a flat array (the
1534 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1535 // If this array is a flat array, take the slow path.
1536 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1537 if (!op->value()->is_illegal()) {
1538 // The array is not a flat array, but it might be null-free. If we are storing
1539 // a null into a null-free array, take the slow path (which will throw NPE).
1540 Label skip;
1541 __ cbnz(op->value()->as_register(), skip);
1542 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1543 __ bind(skip);
1544 }
1545 }
1546
1547 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1548 // We are storing into an array that *may* be null-free (the declared type is
1549 // Object[], abstract[], interface[] or VT.ref[]).
1550 Label test_mark_word;
1551 Register tmp = op->tmp()->as_register();
1552 __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1553 __ tst(tmp, markWord::unlocked_value);
1554 __ br(Assembler::NE, test_mark_word);
1555 __ load_prototype_header(tmp, op->array()->as_register());
1556 __ bind(test_mark_word);
1557 __ tst(tmp, markWord::null_free_array_bit_in_place);
1558 }
1559
1560 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1561 Label L_oops_equal;
1562 Label L_oops_not_equal;
1563 Label L_end;
1564
1565 Register left = op->left()->as_register();
1566 Register right = op->right()->as_register();
1567
1568 __ cmp(left, right);
1569 __ br(Assembler::EQ, L_oops_equal);
1570
1571 // (1) Null check -- if one of the operands is null, the other must not be null (because
1572 // the two references are not equal), so they are not substitutable,
1573 // FIXME: do null check only if the operand is nullable
1574 {
1575 __ cbz(left, L_oops_not_equal);
1576 __ cbz(right, L_oops_not_equal);
1577 }
1578
1579 ciKlass* left_klass = op->left_klass();
1580 ciKlass* right_klass = op->right_klass();
1581
1582 // (2) Inline type check -- if either of the operands is not a inline type,
1583 // they are not substitutable. We do this only if we are not sure that the
1584 // operands are inline type
1585 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1586 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1587 Register tmp1 = op->tmp1()->as_register();
1588 __ mov(tmp1, markWord::inline_type_pattern);
1589 __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1590 __ andr(tmp1, tmp1, rscratch1);
1591 __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1592 __ andr(tmp1, tmp1, rscratch1);
1593 __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1594 __ br(Assembler::NE, L_oops_not_equal);
1595 }
1596
1597 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1598 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1599 // No need to load klass -- the operands are statically known to be the same inline klass.
1600 __ b(*op->stub()->entry());
1601 } else {
1602 Register left_klass_op = op->left_klass_op()->as_register();
1603 Register right_klass_op = op->right_klass_op()->as_register();
1604
1605 if (UseCompressedClassPointers) {
1606 __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1607 __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1608 __ cmpw(left_klass_op, right_klass_op);
1609 } else {
1610 __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1611 __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1612 __ cmp(left_klass_op, right_klass_op);
1613 }
1614
1615 __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1616 // fall through to L_oops_not_equal
1617 }
1618
1619 __ bind(L_oops_not_equal);
1620 move(op->not_equal_result(), op->result_opr());
1621 __ b(L_end);
1622
1623 __ bind(L_oops_equal);
1624 move(op->equal_result(), op->result_opr());
1625 __ b(L_end);
1626
1627 // We've returned from the stub. R0 contains 0x0 IFF the two
1628 // operands are not substitutable. (Don't compare against 0x1 in case the
1629 // C compiler is naughty)
1630 __ bind(*op->stub()->continuation());
1631 __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1632 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1633 // fall-through
1634 __ bind(L_end);
1635 }
1636
1637
1638 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1639 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1640 __ cset(rscratch1, Assembler::NE);
1641 __ membar(__ AnyAny);
1642 }
1643
1644 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1645 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1646 __ cset(rscratch1, Assembler::NE);
1647 __ membar(__ AnyAny);
1648 }
1649
1650
1651 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1652 Register addr;
1653 if (op->addr()->is_register()) {
1654 addr = as_reg(op->addr());
1655 } else {
1656 assert(op->addr()->is_address(), "what else?");
1657 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
2134 __ cmp(left->as_register_lo(), right->as_register_lo());
2135 __ mov(dst->as_register(), (uint64_t)-1L);
2136 __ br(Assembler::LT, done);
2137 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2138 __ bind(done);
2139 } else {
2140 ShouldNotReachHere();
2141 }
2142 }
2143
2144
2145 void LIR_Assembler::align_call(LIR_Code code) { }
2146
2147
2148 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2149 address call = __ trampoline_call(Address(op->addr(), rtype));
2150 if (call == nullptr) {
2151 bailout("trampoline stub overflow");
2152 return;
2153 }
2154 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2155 __ post_call_nop();
2156 }
2157
2158
2159 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2160 address call = __ ic_call(op->addr());
2161 if (call == nullptr) {
2162 bailout("trampoline stub overflow");
2163 return;
2164 }
2165 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2166 __ post_call_nop();
2167 }
2168
2169 void LIR_Assembler::emit_static_call_stub() {
2170 address call_pc = __ pc();
2171 address stub = __ start_a_stub(call_stub_size());
2172 if (stub == nullptr) {
2173 bailout("static call stub overflow");
2174 return;
2175 }
2176
2177 int start = __ offset();
2178
2179 __ relocate(static_stub_Relocation::spec(call_pc));
2180 __ emit_static_call_stub();
2181
2182 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2183 <= call_stub_size(), "stub too big");
2184 __ end_a_stub();
2185 }
2308
2309
2310 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2311 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2312 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2313 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2314 __ mov (rscratch1, c);
2315 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2316 }
2317
2318
2319 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2320 ShouldNotReachHere();
2321 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2322 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2323 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2324 __ lea(rscratch1, __ constant_oop_address(o));
2325 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2326 }
2327
2328 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2329 if (null_check) {
2330 __ cbz(obj, *slow_path->entry());
2331 }
2332 if (is_dest) {
2333 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2334 } else {
2335 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2336 }
2337 }
2338
2339 // This code replaces a call to arraycopy; no exception may
2340 // be thrown in this code, they must be thrown in the System.arraycopy
2341 // activation frame; we could save some checks if this would not be the case
2342 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2343 ciArrayKlass* default_type = op->expected_type();
2344 Register src = op->src()->as_register();
2345 Register dst = op->dst()->as_register();
2346 Register src_pos = op->src_pos()->as_register();
2347 Register dst_pos = op->dst_pos()->as_register();
2348 Register length = op->length()->as_register();
2349 Register tmp = op->tmp()->as_register();
2350
2351 CodeStub* stub = op->stub();
2352 int flags = op->flags();
2353 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2354 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2355
2356 if (flags & LIR_OpArrayCopy::always_slow_path) {
2357 __ b(*stub->entry());
2358 __ bind(*stub->continuation());
2359 return;
2360 }
2361
2362 // if we don't know anything, just go through the generic arraycopy
2363 if (default_type == nullptr // || basic_type == T_OBJECT
2364 ) {
2365 Label done;
2366 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2367
2368 // Save the arguments in case the generic arraycopy fails and we
2369 // have to fall back to the JNI stub
2370 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2371 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2372 __ str(src, Address(sp, 4*BytesPerWord));
2373
2374 address copyfunc_addr = StubRoutines::generic_arraycopy();
2375 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2376
2377 // The arguments are in java calling convention so we shift them
2378 // to C convention
2379 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2380 __ mov(c_rarg0, j_rarg0);
2381 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2395 __ cbz(r0, *stub->continuation());
2396
2397 // Reload values from the stack so they are where the stub
2398 // expects them.
2399 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2400 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2401 __ ldr(src, Address(sp, 4*BytesPerWord));
2402
2403 // r0 is -1^K where K == partial copied count
2404 __ eonw(rscratch1, r0, zr);
2405 // adjust length down and src/end pos up by partial copied count
2406 __ subw(length, length, rscratch1);
2407 __ addw(src_pos, src_pos, rscratch1);
2408 __ addw(dst_pos, dst_pos, rscratch1);
2409 __ b(*stub->entry());
2410
2411 __ bind(*stub->continuation());
2412 return;
2413 }
2414
2415 // Handle inline type arrays
2416 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2417 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2418 }
2419
2420 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2421 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2422 }
2423
2424 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2425
2426 int elem_size = type2aelembytes(basic_type);
2427 int scale = exact_log2(elem_size);
2428
2429 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2430 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2431 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2432 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2433
2434 // test for null
2435 if (flags & LIR_OpArrayCopy::src_null_check) {
2436 __ cbz(src, *stub->entry());
2437 }
2438 if (flags & LIR_OpArrayCopy::dst_null_check) {
2439 __ cbz(dst, *stub->entry());
2440 }
2441
2442 // If the compiler was not able to prove that exact type of the source or the destination
2443 // of the arraycopy is an array type, check at runtime if the source or the destination is
2991 __ verify_klass_ptr(tmp);
2992 #endif
2993 } else {
2994 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2995 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2996
2997 __ ldr(tmp, mdo_addr);
2998 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2999
3000 __ orr(tmp, tmp, TypeEntries::type_unknown);
3001 __ str(tmp, mdo_addr);
3002 // FIXME: Write barrier needed here?
3003 }
3004 }
3005
3006 __ bind(next);
3007 }
3008 COMMENT("} emit_profile_type");
3009 }
3010
3011 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3012 Register obj = op->obj()->as_register();
3013 Register tmp = op->tmp()->as_pointer_register();
3014 bool not_null = op->not_null();
3015 int flag = op->flag();
3016
3017 Label not_inline_type;
3018 if (!not_null) {
3019 __ cbz(obj, not_inline_type);
3020 }
3021
3022 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3023
3024 Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
3025 __ ldrb(rscratch1, mdo_addr);
3026 __ orr(rscratch1, rscratch1, flag);
3027 __ strb(rscratch1, mdo_addr);
3028
3029 __ bind(not_inline_type);
3030 }
3031
3032 void LIR_Assembler::align_backward_branch_target() {
3033 }
3034
3035
3036 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3037 // tmp must be unused
3038 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3039
3040 if (left->is_single_cpu()) {
3041 assert(dest->is_single_cpu(), "expect single result reg");
3042 __ negw(dest->as_register(), left->as_register());
3043 } else if (left->is_double_cpu()) {
3044 assert(dest->is_double_cpu(), "expect double result reg");
3045 __ neg(dest->as_register_lo(), left->as_register_lo());
3046 } else if (left->is_single_fpu()) {
3047 assert(dest->is_single_fpu(), "expect single float result reg");
3048 __ fnegs(dest->as_float_reg(), left->as_float_reg());
3049 } else {
3050 assert(left->is_double_fpu(), "expect double float operand reg");
3151 void LIR_Assembler::membar_loadload() {
3152 __ membar(Assembler::LoadLoad);
3153 }
3154
3155 void LIR_Assembler::membar_storestore() {
3156 __ membar(MacroAssembler::StoreStore);
3157 }
3158
3159 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3160
3161 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3162
3163 void LIR_Assembler::on_spin_wait() {
3164 __ spin_wait();
3165 }
3166
3167 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3168 __ mov(result_reg->as_register(), rthread);
3169 }
3170
3171 void LIR_Assembler::check_orig_pc() {
3172 __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3173 __ cmp(rscratch2, (u1)NULL_WORD);
3174 }
3175
3176 void LIR_Assembler::peephole(LIR_List *lir) {
3177 #if 0
3178 if (tableswitch_count >= max_tableswitches)
3179 return;
3180
3181 /*
3182 This finite-state automaton recognizes sequences of compare-and-
3183 branch instructions. We will turn them into a tableswitch. You
3184 could argue that C1 really shouldn't be doing this sort of
3185 optimization, but without it the code is really horrible.
3186 */
3187
3188 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3189 int first_key, last_key = -2147483648;
3190 int next_key = 0;
3191 int start_insn = -1;
3192 int last_insn = -1;
3193 Register reg = noreg;
3194 LIR_Opr reg_opr;
|