16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_aarch64.inline.hpp"
47
48
49 #ifndef PRODUCT
50 #define COMMENT(x) do { __ block_comment(x); } while (0)
51 #else
52 #define COMMENT(x)
53 #endif
54
55 NEEDS_CLEANUP // remove this definitions ?
56 const Register SYNC_header = r0; // synchronization header
57 const Register SHIFT_count = r0; // where count for shift operations must be
58
59 #define __ _masm->
60
61
414 if (LockingMode == LM_MONITOR) {
415 __ b(*stub->entry());
416 } else {
417 __ unlock_object(r5, r4, r0, r6, *stub->entry());
418 }
419 __ bind(*stub->continuation());
420 }
421
422 if (compilation()->env()->dtrace_method_probes()) {
423 __ mov(c_rarg0, rthread);
424 __ mov_metadata(c_rarg1, method()->constant_encoding());
425 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
426 }
427
428 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
429 __ mov(r0, r19); // Restore the exception
430 }
431
432 // remove the activation and dispatch to the unwind handler
433 __ block_comment("remove_frame and dispatch to the unwind handler");
434 __ remove_frame(initial_frame_size_in_bytes());
435 __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
436
437 // Emit the slow path assembly
438 if (stub != nullptr) {
439 stub->emit_code(this);
440 }
441
442 return offset;
443 }
444
445
446 int LIR_Assembler::emit_deopt_handler() {
447 // generate code for exception handler
448 address handler_base = __ start_a_stub(deopt_handler_size());
449 if (handler_base == nullptr) {
450 // not enough space left for the handler
451 bailout("deopt handler overflow");
452 return -1;
453 }
454
458 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
459 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
460 __ end_a_stub();
461
462 return offset;
463 }
464
465 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
466 _masm->code_section()->relocate(adr, relocInfo::poll_type);
467 int pc_offset = code_offset();
468 flush_debug_info(pc_offset);
469 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
470 if (info->exception_handlers() != nullptr) {
471 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
472 }
473 }
474
475 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
476 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
477
478 // Pop the stack before the safepoint code
479 __ remove_frame(initial_frame_size_in_bytes());
480
481 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
482 __ reserved_stack_check();
483 }
484
485 code_stub->set_safepoint_offset(__ offset());
486 __ relocate(relocInfo::poll_return_type);
487 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
488 __ ret(lr);
489 }
490
491 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
492 guarantee(info != nullptr, "Shouldn't be null");
493 __ get_polling_page(rscratch1, relocInfo::poll_type);
494 add_debug_info_for_branch(info); // This isn't just debug info:
495 // it's the oop map
496 __ read_polling_page(rscratch1, relocInfo::poll_type);
497 return __ offset();
498 }
499
500
501 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
502 if (from_reg == r31_sp)
503 from_reg = sp;
504 if (to_reg == r31_sp)
505 to_reg = sp;
506 __ mov(to_reg, from_reg);
507 }
508
509 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
510
517 switch (c->type()) {
518 case T_INT: {
519 assert(patch_code == lir_patch_none, "no patching handled here");
520 __ movw(dest->as_register(), c->as_jint());
521 break;
522 }
523
524 case T_ADDRESS: {
525 assert(patch_code == lir_patch_none, "no patching handled here");
526 __ mov(dest->as_register(), c->as_jint());
527 break;
528 }
529
530 case T_LONG: {
531 assert(patch_code == lir_patch_none, "no patching handled here");
532 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
533 break;
534 }
535
536 case T_OBJECT: {
537 if (patch_code == lir_patch_none) {
538 jobject2reg(c->as_jobject(), dest->as_register());
539 } else {
540 jobject2reg_with_patching(dest->as_register(), info);
541 }
542 break;
543 }
544
545 case T_METADATA: {
546 if (patch_code != lir_patch_none) {
547 klass2reg_with_patching(dest->as_register(), info);
548 } else {
549 __ mov_metadata(dest->as_register(), c->as_metadata());
550 }
551 break;
552 }
553
554 case T_FLOAT: {
555 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
556 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
557 } else {
558 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
559 __ ldrs(dest->as_float_reg(), Address(rscratch1));
560 }
630 LIR_Const* c = src->as_constant_ptr();
631 LIR_Address* to_addr = dest->as_address_ptr();
632
633 void (Assembler::* insn)(Register Rt, const Address &adr);
634
635 switch (type) {
636 case T_ADDRESS:
637 assert(c->as_jint() == 0, "should be");
638 insn = &Assembler::str;
639 break;
640 case T_LONG:
641 assert(c->as_jlong() == 0, "should be");
642 insn = &Assembler::str;
643 break;
644 case T_INT:
645 assert(c->as_jint() == 0, "should be");
646 insn = &Assembler::strw;
647 break;
648 case T_OBJECT:
649 case T_ARRAY:
650 assert(c->as_jobject() == nullptr, "should be");
651 if (UseCompressedOops && !wide) {
652 insn = &Assembler::strw;
653 } else {
654 insn = &Assembler::str;
655 }
656 break;
657 case T_CHAR:
658 case T_SHORT:
659 assert(c->as_jint() == 0, "should be");
660 insn = &Assembler::strh;
661 break;
662 case T_BOOLEAN:
663 case T_BYTE:
664 assert(c->as_jint() == 0, "should be");
665 insn = &Assembler::strb;
666 break;
667 default:
668 ShouldNotReachHere();
669 insn = &Assembler::str; // unreachable
977 case T_CHAR:
978 __ ldrh(dest->as_register(), as_Address(from_addr));
979 break;
980 case T_SHORT:
981 __ ldrsh(dest->as_register(), as_Address(from_addr));
982 break;
983
984 default:
985 ShouldNotReachHere();
986 }
987
988 if (is_reference_type(type)) {
989 if (UseCompressedOops && !wide) {
990 __ decode_heap_oop(dest->as_register());
991 }
992
993 __ verify_oop(dest->as_register());
994 }
995 }
996
997
998 int LIR_Assembler::array_element_size(BasicType type) const {
999 int elem_size = type2aelembytes(type);
1000 return exact_log2(elem_size);
1001 }
1002
1003
1004 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1005 switch (op->code()) {
1006 case lir_idiv:
1007 case lir_irem:
1008 arithmetic_idiv(op->code(),
1009 op->in_opr1(),
1010 op->in_opr2(),
1011 op->in_opr3(),
1012 op->result_opr(),
1013 op->info());
1014 break;
1015 case lir_fmad:
1016 __ fmaddd(op->result_opr()->as_double_reg(),
1168 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1169 __ ldarb(rscratch1, rscratch1);
1170 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1171 add_debug_info_for_null_check_here(op->stub()->info());
1172 __ br(Assembler::NE, *op->stub()->entry());
1173 }
1174 __ allocate_object(op->obj()->as_register(),
1175 op->tmp1()->as_register(),
1176 op->tmp2()->as_register(),
1177 op->header_size(),
1178 op->object_size(),
1179 op->klass()->as_register(),
1180 *op->stub()->entry());
1181 __ bind(*op->stub()->continuation());
1182 }
1183
1184 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1185 Register len = op->len()->as_register();
1186 __ uxtw(len, len);
1187
1188 if (UseSlowPath ||
1189 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1190 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1191 __ b(*op->stub()->entry());
1192 } else {
1193 Register tmp1 = op->tmp1()->as_register();
1194 Register tmp2 = op->tmp2()->as_register();
1195 Register tmp3 = op->tmp3()->as_register();
1196 if (len == tmp1) {
1197 tmp1 = tmp3;
1198 } else if (len == tmp2) {
1199 tmp2 = tmp3;
1200 } else if (len == tmp3) {
1201 // everything is ok
1202 } else {
1203 __ mov(tmp3, len);
1204 }
1205 __ allocate_array(op->obj()->as_register(),
1206 len,
1207 tmp1,
1208 tmp2,
1274 assert(data != nullptr, "need data for type check");
1275 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1276 }
1277 Label* success_target = success;
1278 Label* failure_target = failure;
1279
1280 if (obj == k_RInfo) {
1281 k_RInfo = dst;
1282 } else if (obj == klass_RInfo) {
1283 klass_RInfo = dst;
1284 }
1285 if (k->is_loaded() && !UseCompressedClassPointers) {
1286 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1287 } else {
1288 Rtmp1 = op->tmp3()->as_register();
1289 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1290 }
1291
1292 assert_different_registers(obj, k_RInfo, klass_RInfo);
1293
1294 if (should_profile) {
1295 Register mdo = klass_RInfo;
1296 __ mov_metadata(mdo, md->constant_encoding());
1297 Label not_null;
1298 __ cbnz(obj, not_null);
1299 // Object is null; update MDO and exit
1300 Address data_addr
1301 = __ form_address(rscratch2, mdo,
1302 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1303 0);
1304 __ ldrb(rscratch1, data_addr);
1305 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1306 __ strb(rscratch1, data_addr);
1307 __ b(*obj_is_null);
1308 __ bind(not_null);
1309
1310 Label update_done;
1311 Register recv = k_RInfo;
1312 __ load_klass(recv, obj);
1313 type_profile_helper(mdo, md, data, recv, &update_done);
1314 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1315 __ addptr(counter_addr, DataLayout::counter_increment);
1316
1317 __ bind(update_done);
1318 } else {
1319 __ cbz(obj, *obj_is_null);
1320 }
1321
1322 if (!k->is_loaded()) {
1323 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1324 } else {
1325 __ mov_metadata(k_RInfo, k->constant_encoding());
1326 }
1327 __ verify_oop(obj);
1328
1329 if (op->fast_check()) {
1330 // get object class
1331 // not a safepoint as obj null check happens earlier
1332 __ load_klass(rscratch1, obj);
1333 __ cmp( rscratch1, k_RInfo);
1334
1335 __ br(Assembler::NE, *failure_target);
1336 // successful cast, fall through to profile or jump
1337 } else {
1338 // get object class
1339 // not a safepoint as obj null check happens earlier
1458 __ bind(success);
1459 if (dst != obj) {
1460 __ mov(dst, obj);
1461 }
1462 } else if (code == lir_instanceof) {
1463 Register obj = op->object()->as_register();
1464 Register dst = op->result_opr()->as_register();
1465 Label success, failure, done;
1466 emit_typecheck_helper(op, &success, &failure, &failure);
1467 __ bind(failure);
1468 __ mov(dst, zr);
1469 __ b(done);
1470 __ bind(success);
1471 __ mov(dst, 1);
1472 __ bind(done);
1473 } else {
1474 ShouldNotReachHere();
1475 }
1476 }
1477
1478 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1479 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1480 __ cset(rscratch1, Assembler::NE);
1481 __ membar(__ AnyAny);
1482 }
1483
1484 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1485 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1486 __ cset(rscratch1, Assembler::NE);
1487 __ membar(__ AnyAny);
1488 }
1489
1490
1491 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1492 Register addr;
1493 if (op->addr()->is_register()) {
1494 addr = as_reg(op->addr());
1495 } else {
1496 assert(op->addr()->is_address(), "what else?");
1497 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1974 __ cmp(left->as_register_lo(), right->as_register_lo());
1975 __ mov(dst->as_register(), (uint64_t)-1L);
1976 __ br(Assembler::LT, done);
1977 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1978 __ bind(done);
1979 } else {
1980 ShouldNotReachHere();
1981 }
1982 }
1983
1984
1985 void LIR_Assembler::align_call(LIR_Code code) { }
1986
1987
1988 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1989 address call = __ trampoline_call(Address(op->addr(), rtype));
1990 if (call == nullptr) {
1991 bailout("trampoline stub overflow");
1992 return;
1993 }
1994 add_call_info(code_offset(), op->info());
1995 __ post_call_nop();
1996 }
1997
1998
1999 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2000 address call = __ ic_call(op->addr());
2001 if (call == nullptr) {
2002 bailout("trampoline stub overflow");
2003 return;
2004 }
2005 add_call_info(code_offset(), op->info());
2006 __ post_call_nop();
2007 }
2008
2009 void LIR_Assembler::emit_static_call_stub() {
2010 address call_pc = __ pc();
2011 address stub = __ start_a_stub(call_stub_size());
2012 if (stub == nullptr) {
2013 bailout("static call stub overflow");
2014 return;
2015 }
2016
2017 int start = __ offset();
2018
2019 __ relocate(static_stub_Relocation::spec(call_pc));
2020 __ emit_static_call_stub();
2021
2022 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2023 <= call_stub_size(), "stub too big");
2024 __ end_a_stub();
2025 }
2148
2149
2150 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2151 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2152 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2153 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2154 __ mov (rscratch1, c);
2155 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2156 }
2157
2158
2159 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2160 ShouldNotReachHere();
2161 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2162 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2163 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2164 __ lea(rscratch1, __ constant_oop_address(o));
2165 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2166 }
2167
2168
2169 // This code replaces a call to arraycopy; no exception may
2170 // be thrown in this code, they must be thrown in the System.arraycopy
2171 // activation frame; we could save some checks if this would not be the case
2172 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2173 ciArrayKlass* default_type = op->expected_type();
2174 Register src = op->src()->as_register();
2175 Register dst = op->dst()->as_register();
2176 Register src_pos = op->src_pos()->as_register();
2177 Register dst_pos = op->dst_pos()->as_register();
2178 Register length = op->length()->as_register();
2179 Register tmp = op->tmp()->as_register();
2180
2181 CodeStub* stub = op->stub();
2182 int flags = op->flags();
2183 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2184 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2185
2186 // if we don't know anything, just go through the generic arraycopy
2187 if (default_type == nullptr // || basic_type == T_OBJECT
2188 ) {
2189 Label done;
2190 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2191
2192 // Save the arguments in case the generic arraycopy fails and we
2193 // have to fall back to the JNI stub
2194 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2195 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2196 __ str(src, Address(sp, 4*BytesPerWord));
2197
2198 address copyfunc_addr = StubRoutines::generic_arraycopy();
2199 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2200
2201 // The arguments are in java calling convention so we shift them
2202 // to C convention
2203 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2204 __ mov(c_rarg0, j_rarg0);
2205 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2219 __ cbz(r0, *stub->continuation());
2220
2221 // Reload values from the stack so they are where the stub
2222 // expects them.
2223 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2224 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2225 __ ldr(src, Address(sp, 4*BytesPerWord));
2226
2227 // r0 is -1^K where K == partial copied count
2228 __ eonw(rscratch1, r0, zr);
2229 // adjust length down and src/end pos up by partial copied count
2230 __ subw(length, length, rscratch1);
2231 __ addw(src_pos, src_pos, rscratch1);
2232 __ addw(dst_pos, dst_pos, rscratch1);
2233 __ b(*stub->entry());
2234
2235 __ bind(*stub->continuation());
2236 return;
2237 }
2238
2239 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2240
2241 int elem_size = type2aelembytes(basic_type);
2242 int scale = exact_log2(elem_size);
2243
2244 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2245 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2246 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2247 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2248
2249 // test for null
2250 if (flags & LIR_OpArrayCopy::src_null_check) {
2251 __ cbz(src, *stub->entry());
2252 }
2253 if (flags & LIR_OpArrayCopy::dst_null_check) {
2254 __ cbz(dst, *stub->entry());
2255 }
2256
2257 // If the compiler was not able to prove that exact type of the source or the destination
2258 // of the arraycopy is an array type, check at runtime if the source or the destination is
2808 __ verify_klass_ptr(tmp);
2809 #endif
2810 } else {
2811 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2812 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2813
2814 __ ldr(tmp, mdo_addr);
2815 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2816
2817 __ orr(tmp, tmp, TypeEntries::type_unknown);
2818 __ str(tmp, mdo_addr);
2819 // FIXME: Write barrier needed here?
2820 }
2821 }
2822
2823 __ bind(next);
2824 }
2825 COMMENT("} emit_profile_type");
2826 }
2827
2828
2829 void LIR_Assembler::align_backward_branch_target() {
2830 }
2831
2832
2833 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2834 // tmp must be unused
2835 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2836
2837 if (left->is_single_cpu()) {
2838 assert(dest->is_single_cpu(), "expect single result reg");
2839 __ negw(dest->as_register(), left->as_register());
2840 } else if (left->is_double_cpu()) {
2841 assert(dest->is_double_cpu(), "expect double result reg");
2842 __ neg(dest->as_register_lo(), left->as_register_lo());
2843 } else if (left->is_single_fpu()) {
2844 assert(dest->is_single_fpu(), "expect single float result reg");
2845 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2846 } else {
2847 assert(left->is_double_fpu(), "expect double float operand reg");
2948 void LIR_Assembler::membar_loadload() {
2949 __ membar(Assembler::LoadLoad);
2950 }
2951
2952 void LIR_Assembler::membar_storestore() {
2953 __ membar(MacroAssembler::StoreStore);
2954 }
2955
2956 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2957
2958 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2959
2960 void LIR_Assembler::on_spin_wait() {
2961 __ spin_wait();
2962 }
2963
2964 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2965 __ mov(result_reg->as_register(), rthread);
2966 }
2967
2968
2969 void LIR_Assembler::peephole(LIR_List *lir) {
2970 #if 0
2971 if (tableswitch_count >= max_tableswitches)
2972 return;
2973
2974 /*
2975 This finite-state automaton recognizes sequences of compare-and-
2976 branch instructions. We will turn them into a tableswitch. You
2977 could argue that C1 really shouldn't be doing this sort of
2978 optimization, but without it the code is really horrible.
2979 */
2980
2981 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2982 int first_key, last_key = -2147483648;
2983 int next_key = 0;
2984 int start_insn = -1;
2985 int last_insn = -1;
2986 Register reg = noreg;
2987 LIR_Opr reg_opr;
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInlineKlass.hpp"
37 #include "ci/ciInstance.hpp"
38 #include "code/compiledIC.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gc_globals.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_aarch64.inline.hpp"
49
50
51 #ifndef PRODUCT
52 #define COMMENT(x) do { __ block_comment(x); } while (0)
53 #else
54 #define COMMENT(x)
55 #endif
56
57 NEEDS_CLEANUP // remove this definitions ?
58 const Register SYNC_header = r0; // synchronization header
59 const Register SHIFT_count = r0; // where count for shift operations must be
60
61 #define __ _masm->
62
63
416 if (LockingMode == LM_MONITOR) {
417 __ b(*stub->entry());
418 } else {
419 __ unlock_object(r5, r4, r0, r6, *stub->entry());
420 }
421 __ bind(*stub->continuation());
422 }
423
424 if (compilation()->env()->dtrace_method_probes()) {
425 __ mov(c_rarg0, rthread);
426 __ mov_metadata(c_rarg1, method()->constant_encoding());
427 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
428 }
429
430 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
431 __ mov(r0, r19); // Restore the exception
432 }
433
434 // remove the activation and dispatch to the unwind handler
435 __ block_comment("remove_frame and dispatch to the unwind handler");
436 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
437 __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
438
439 // Emit the slow path assembly
440 if (stub != nullptr) {
441 stub->emit_code(this);
442 }
443
444 return offset;
445 }
446
447
448 int LIR_Assembler::emit_deopt_handler() {
449 // generate code for exception handler
450 address handler_base = __ start_a_stub(deopt_handler_size());
451 if (handler_base == nullptr) {
452 // not enough space left for the handler
453 bailout("deopt handler overflow");
454 return -1;
455 }
456
460 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
461 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
462 __ end_a_stub();
463
464 return offset;
465 }
466
467 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
468 _masm->code_section()->relocate(adr, relocInfo::poll_type);
469 int pc_offset = code_offset();
470 flush_debug_info(pc_offset);
471 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
472 if (info->exception_handlers() != nullptr) {
473 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
474 }
475 }
476
477 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
478 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
479
480 if (InlineTypeReturnedAsFields) {
481 // Check if we are returning an non-null inline type and load its fields into registers
482 ciType* return_type = compilation()->method()->return_type();
483 if (return_type->is_inlinetype()) {
484 ciInlineKlass* vk = return_type->as_inline_klass();
485 if (vk->can_be_returned_as_fields()) {
486 address unpack_handler = vk->unpack_handler();
487 assert(unpack_handler != nullptr, "must be");
488 __ far_call(RuntimeAddress(unpack_handler));
489 }
490 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
491 Label skip;
492 __ test_oop_is_not_inline_type(r0, rscratch2, skip);
493
494 // Load fields from a buffered value with an inline class specific handler
495 __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
496 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
497 __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
498 // Unpack handler can be null if inline type is not scalarizable in returns
499 __ cbz(rscratch1, skip);
500 __ blr(rscratch1);
501
502 __ bind(skip);
503 }
504 // At this point, r0 points to the value object (for interpreter or C1 caller).
505 // The fields of the object are copied into registers (for C2 caller).
506 }
507
508 // Pop the stack before the safepoint code
509 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
510
511 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
512 __ reserved_stack_check();
513 }
514
515 code_stub->set_safepoint_offset(__ offset());
516 __ relocate(relocInfo::poll_return_type);
517 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
518 __ ret(lr);
519 }
520
521 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
522 return (__ store_inline_type_fields_to_buf(vk, false));
523 }
524
525 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
526 guarantee(info != nullptr, "Shouldn't be null");
527 __ get_polling_page(rscratch1, relocInfo::poll_type);
528 add_debug_info_for_branch(info); // This isn't just debug info:
529 // it's the oop map
530 __ read_polling_page(rscratch1, relocInfo::poll_type);
531 return __ offset();
532 }
533
534
535 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
536 if (from_reg == r31_sp)
537 from_reg = sp;
538 if (to_reg == r31_sp)
539 to_reg = sp;
540 __ mov(to_reg, from_reg);
541 }
542
543 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
544
551 switch (c->type()) {
552 case T_INT: {
553 assert(patch_code == lir_patch_none, "no patching handled here");
554 __ movw(dest->as_register(), c->as_jint());
555 break;
556 }
557
558 case T_ADDRESS: {
559 assert(patch_code == lir_patch_none, "no patching handled here");
560 __ mov(dest->as_register(), c->as_jint());
561 break;
562 }
563
564 case T_LONG: {
565 assert(patch_code == lir_patch_none, "no patching handled here");
566 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
567 break;
568 }
569
570 case T_OBJECT: {
571 if (patch_code != lir_patch_none) {
572 jobject2reg_with_patching(dest->as_register(), info);
573 } else {
574 jobject2reg(c->as_jobject(), dest->as_register());
575 }
576 break;
577 }
578
579 case T_METADATA: {
580 if (patch_code != lir_patch_none) {
581 klass2reg_with_patching(dest->as_register(), info);
582 } else {
583 __ mov_metadata(dest->as_register(), c->as_metadata());
584 }
585 break;
586 }
587
588 case T_FLOAT: {
589 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
590 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
591 } else {
592 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
593 __ ldrs(dest->as_float_reg(), Address(rscratch1));
594 }
664 LIR_Const* c = src->as_constant_ptr();
665 LIR_Address* to_addr = dest->as_address_ptr();
666
667 void (Assembler::* insn)(Register Rt, const Address &adr);
668
669 switch (type) {
670 case T_ADDRESS:
671 assert(c->as_jint() == 0, "should be");
672 insn = &Assembler::str;
673 break;
674 case T_LONG:
675 assert(c->as_jlong() == 0, "should be");
676 insn = &Assembler::str;
677 break;
678 case T_INT:
679 assert(c->as_jint() == 0, "should be");
680 insn = &Assembler::strw;
681 break;
682 case T_OBJECT:
683 case T_ARRAY:
684 // Non-null case is not handled on aarch64 but handled on x86
685 // FIXME: do we need to add it here?
686 assert(c->as_jobject() == nullptr, "should be");
687 if (UseCompressedOops && !wide) {
688 insn = &Assembler::strw;
689 } else {
690 insn = &Assembler::str;
691 }
692 break;
693 case T_CHAR:
694 case T_SHORT:
695 assert(c->as_jint() == 0, "should be");
696 insn = &Assembler::strh;
697 break;
698 case T_BOOLEAN:
699 case T_BYTE:
700 assert(c->as_jint() == 0, "should be");
701 insn = &Assembler::strb;
702 break;
703 default:
704 ShouldNotReachHere();
705 insn = &Assembler::str; // unreachable
1013 case T_CHAR:
1014 __ ldrh(dest->as_register(), as_Address(from_addr));
1015 break;
1016 case T_SHORT:
1017 __ ldrsh(dest->as_register(), as_Address(from_addr));
1018 break;
1019
1020 default:
1021 ShouldNotReachHere();
1022 }
1023
1024 if (is_reference_type(type)) {
1025 if (UseCompressedOops && !wide) {
1026 __ decode_heap_oop(dest->as_register());
1027 }
1028
1029 __ verify_oop(dest->as_register());
1030 }
1031 }
1032
1033 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1034 assert(dst->is_cpu_register(), "must be");
1035 assert(dst->type() == src->type(), "must be");
1036
1037 if (src->is_cpu_register()) {
1038 reg2reg(src, dst);
1039 } else if (src->is_stack()) {
1040 stack2reg(src, dst, dst->type());
1041 } else if (src->is_constant()) {
1042 const2reg(src, dst, lir_patch_none, nullptr);
1043 } else {
1044 ShouldNotReachHere();
1045 }
1046 }
1047
1048 int LIR_Assembler::array_element_size(BasicType type) const {
1049 int elem_size = type2aelembytes(type);
1050 return exact_log2(elem_size);
1051 }
1052
1053
1054 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1055 switch (op->code()) {
1056 case lir_idiv:
1057 case lir_irem:
1058 arithmetic_idiv(op->code(),
1059 op->in_opr1(),
1060 op->in_opr2(),
1061 op->in_opr3(),
1062 op->result_opr(),
1063 op->info());
1064 break;
1065 case lir_fmad:
1066 __ fmaddd(op->result_opr()->as_double_reg(),
1218 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1219 __ ldarb(rscratch1, rscratch1);
1220 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1221 add_debug_info_for_null_check_here(op->stub()->info());
1222 __ br(Assembler::NE, *op->stub()->entry());
1223 }
1224 __ allocate_object(op->obj()->as_register(),
1225 op->tmp1()->as_register(),
1226 op->tmp2()->as_register(),
1227 op->header_size(),
1228 op->object_size(),
1229 op->klass()->as_register(),
1230 *op->stub()->entry());
1231 __ bind(*op->stub()->continuation());
1232 }
1233
1234 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1235 Register len = op->len()->as_register();
1236 __ uxtw(len, len);
1237
1238 if (UseSlowPath || op->is_null_free() ||
1239 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1240 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1241 __ b(*op->stub()->entry());
1242 } else {
1243 Register tmp1 = op->tmp1()->as_register();
1244 Register tmp2 = op->tmp2()->as_register();
1245 Register tmp3 = op->tmp3()->as_register();
1246 if (len == tmp1) {
1247 tmp1 = tmp3;
1248 } else if (len == tmp2) {
1249 tmp2 = tmp3;
1250 } else if (len == tmp3) {
1251 // everything is ok
1252 } else {
1253 __ mov(tmp3, len);
1254 }
1255 __ allocate_array(op->obj()->as_register(),
1256 len,
1257 tmp1,
1258 tmp2,
1324 assert(data != nullptr, "need data for type check");
1325 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1326 }
1327 Label* success_target = success;
1328 Label* failure_target = failure;
1329
1330 if (obj == k_RInfo) {
1331 k_RInfo = dst;
1332 } else if (obj == klass_RInfo) {
1333 klass_RInfo = dst;
1334 }
1335 if (k->is_loaded() && !UseCompressedClassPointers) {
1336 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1337 } else {
1338 Rtmp1 = op->tmp3()->as_register();
1339 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1340 }
1341
1342 assert_different_registers(obj, k_RInfo, klass_RInfo);
1343
1344 if (op->need_null_check()) {
1345 if (should_profile) {
1346 Register mdo = klass_RInfo;
1347 __ mov_metadata(mdo, md->constant_encoding());
1348 Label not_null;
1349 __ cbnz(obj, not_null);
1350 // Object is null; update MDO and exit
1351 Address data_addr
1352 = __ form_address(rscratch2, mdo,
1353 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1354 0);
1355 __ ldrb(rscratch1, data_addr);
1356 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1357 __ strb(rscratch1, data_addr);
1358 __ b(*obj_is_null);
1359 __ bind(not_null);
1360
1361 Label update_done;
1362 Register recv = k_RInfo;
1363 __ load_klass(recv, obj);
1364 type_profile_helper(mdo, md, data, recv, &update_done);
1365 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1366 __ addptr(counter_addr, DataLayout::counter_increment);
1367
1368 __ bind(update_done);
1369 } else {
1370 __ cbz(obj, *obj_is_null);
1371 }
1372 }
1373
1374 if (!k->is_loaded()) {
1375 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1376 } else {
1377 __ mov_metadata(k_RInfo, k->constant_encoding());
1378 }
1379 __ verify_oop(obj);
1380
1381 if (op->fast_check()) {
1382 // get object class
1383 // not a safepoint as obj null check happens earlier
1384 __ load_klass(rscratch1, obj);
1385 __ cmp( rscratch1, k_RInfo);
1386
1387 __ br(Assembler::NE, *failure_target);
1388 // successful cast, fall through to profile or jump
1389 } else {
1390 // get object class
1391 // not a safepoint as obj null check happens earlier
1510 __ bind(success);
1511 if (dst != obj) {
1512 __ mov(dst, obj);
1513 }
1514 } else if (code == lir_instanceof) {
1515 Register obj = op->object()->as_register();
1516 Register dst = op->result_opr()->as_register();
1517 Label success, failure, done;
1518 emit_typecheck_helper(op, &success, &failure, &failure);
1519 __ bind(failure);
1520 __ mov(dst, zr);
1521 __ b(done);
1522 __ bind(success);
1523 __ mov(dst, 1);
1524 __ bind(done);
1525 } else {
1526 ShouldNotReachHere();
1527 }
1528 }
1529
1530 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1531 // We are loading/storing from/to an array that *may* be a flat array (the
1532 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1533 // If this array is a flat array, take the slow path.
1534 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1535 if (!op->value()->is_illegal()) {
1536 // The array is not a flat array, but it might be null-free. If we are storing
1537 // a null into a null-free array, take the slow path (which will throw NPE).
1538 Label skip;
1539 __ cbnz(op->value()->as_register(), skip);
1540 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1541 __ bind(skip);
1542 }
1543 }
1544
1545 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1546 // We are storing into an array that *may* be null-free (the declared type is
1547 // Object[], abstract[], interface[] or VT.ref[]).
1548 Label test_mark_word;
1549 Register tmp = op->tmp()->as_register();
1550 __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1551 __ tst(tmp, markWord::unlocked_value);
1552 __ br(Assembler::NE, test_mark_word);
1553 __ load_prototype_header(tmp, op->array()->as_register());
1554 __ bind(test_mark_word);
1555 __ tst(tmp, markWord::null_free_array_bit_in_place);
1556 }
1557
1558 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1559 Label L_oops_equal;
1560 Label L_oops_not_equal;
1561 Label L_end;
1562
1563 Register left = op->left()->as_register();
1564 Register right = op->right()->as_register();
1565
1566 __ cmp(left, right);
1567 __ br(Assembler::EQ, L_oops_equal);
1568
1569 // (1) Null check -- if one of the operands is null, the other must not be null (because
1570 // the two references are not equal), so they are not substitutable,
1571 // FIXME: do null check only if the operand is nullable
1572 {
1573 __ cbz(left, L_oops_not_equal);
1574 __ cbz(right, L_oops_not_equal);
1575 }
1576
1577 ciKlass* left_klass = op->left_klass();
1578 ciKlass* right_klass = op->right_klass();
1579
1580 // (2) Inline type check -- if either of the operands is not a inline type,
1581 // they are not substitutable. We do this only if we are not sure that the
1582 // operands are inline type
1583 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1584 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1585 Register tmp1 = op->tmp1()->as_register();
1586 __ mov(tmp1, markWord::inline_type_pattern);
1587 __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1588 __ andr(tmp1, tmp1, rscratch1);
1589 __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1590 __ andr(tmp1, tmp1, rscratch1);
1591 __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1592 __ br(Assembler::NE, L_oops_not_equal);
1593 }
1594
1595 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1596 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1597 // No need to load klass -- the operands are statically known to be the same inline klass.
1598 __ b(*op->stub()->entry());
1599 } else {
1600 Register left_klass_op = op->left_klass_op()->as_register();
1601 Register right_klass_op = op->right_klass_op()->as_register();
1602
1603 if (UseCompressedClassPointers) {
1604 __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1605 __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1606 __ cmpw(left_klass_op, right_klass_op);
1607 } else {
1608 __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1609 __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1610 __ cmp(left_klass_op, right_klass_op);
1611 }
1612
1613 __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1614 // fall through to L_oops_not_equal
1615 }
1616
1617 __ bind(L_oops_not_equal);
1618 move(op->not_equal_result(), op->result_opr());
1619 __ b(L_end);
1620
1621 __ bind(L_oops_equal);
1622 move(op->equal_result(), op->result_opr());
1623 __ b(L_end);
1624
1625 // We've returned from the stub. R0 contains 0x0 IFF the two
1626 // operands are not substitutable. (Don't compare against 0x1 in case the
1627 // C compiler is naughty)
1628 __ bind(*op->stub()->continuation());
1629 __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1630 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1631 // fall-through
1632 __ bind(L_end);
1633 }
1634
1635
1636 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1637 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1638 __ cset(rscratch1, Assembler::NE);
1639 __ membar(__ AnyAny);
1640 }
1641
1642 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1643 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1644 __ cset(rscratch1, Assembler::NE);
1645 __ membar(__ AnyAny);
1646 }
1647
1648
1649 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1650 Register addr;
1651 if (op->addr()->is_register()) {
1652 addr = as_reg(op->addr());
1653 } else {
1654 assert(op->addr()->is_address(), "what else?");
1655 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
2132 __ cmp(left->as_register_lo(), right->as_register_lo());
2133 __ mov(dst->as_register(), (uint64_t)-1L);
2134 __ br(Assembler::LT, done);
2135 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2136 __ bind(done);
2137 } else {
2138 ShouldNotReachHere();
2139 }
2140 }
2141
2142
2143 void LIR_Assembler::align_call(LIR_Code code) { }
2144
2145
2146 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2147 address call = __ trampoline_call(Address(op->addr(), rtype));
2148 if (call == nullptr) {
2149 bailout("trampoline stub overflow");
2150 return;
2151 }
2152 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2153 __ post_call_nop();
2154 }
2155
2156
2157 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2158 address call = __ ic_call(op->addr());
2159 if (call == nullptr) {
2160 bailout("trampoline stub overflow");
2161 return;
2162 }
2163 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2164 __ post_call_nop();
2165 }
2166
2167 void LIR_Assembler::emit_static_call_stub() {
2168 address call_pc = __ pc();
2169 address stub = __ start_a_stub(call_stub_size());
2170 if (stub == nullptr) {
2171 bailout("static call stub overflow");
2172 return;
2173 }
2174
2175 int start = __ offset();
2176
2177 __ relocate(static_stub_Relocation::spec(call_pc));
2178 __ emit_static_call_stub();
2179
2180 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2181 <= call_stub_size(), "stub too big");
2182 __ end_a_stub();
2183 }
2306
2307
2308 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2309 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2310 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2311 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2312 __ mov (rscratch1, c);
2313 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2314 }
2315
2316
2317 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2318 ShouldNotReachHere();
2319 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2320 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2321 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2322 __ lea(rscratch1, __ constant_oop_address(o));
2323 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2324 }
2325
2326 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2327 if (null_check) {
2328 __ cbz(obj, *slow_path->entry());
2329 }
2330 if (is_dest) {
2331 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2332 } else {
2333 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2334 }
2335 }
2336
2337 // This code replaces a call to arraycopy; no exception may
2338 // be thrown in this code, they must be thrown in the System.arraycopy
2339 // activation frame; we could save some checks if this would not be the case
2340 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2341 ciArrayKlass* default_type = op->expected_type();
2342 Register src = op->src()->as_register();
2343 Register dst = op->dst()->as_register();
2344 Register src_pos = op->src_pos()->as_register();
2345 Register dst_pos = op->dst_pos()->as_register();
2346 Register length = op->length()->as_register();
2347 Register tmp = op->tmp()->as_register();
2348
2349 CodeStub* stub = op->stub();
2350 int flags = op->flags();
2351 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2352 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2353
2354 if (flags & LIR_OpArrayCopy::always_slow_path) {
2355 __ b(*stub->entry());
2356 __ bind(*stub->continuation());
2357 return;
2358 }
2359
2360 // if we don't know anything, just go through the generic arraycopy
2361 if (default_type == nullptr // || basic_type == T_OBJECT
2362 ) {
2363 Label done;
2364 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2365
2366 // Save the arguments in case the generic arraycopy fails and we
2367 // have to fall back to the JNI stub
2368 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2369 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2370 __ str(src, Address(sp, 4*BytesPerWord));
2371
2372 address copyfunc_addr = StubRoutines::generic_arraycopy();
2373 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2374
2375 // The arguments are in java calling convention so we shift them
2376 // to C convention
2377 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2378 __ mov(c_rarg0, j_rarg0);
2379 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2393 __ cbz(r0, *stub->continuation());
2394
2395 // Reload values from the stack so they are where the stub
2396 // expects them.
2397 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2398 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2399 __ ldr(src, Address(sp, 4*BytesPerWord));
2400
2401 // r0 is -1^K where K == partial copied count
2402 __ eonw(rscratch1, r0, zr);
2403 // adjust length down and src/end pos up by partial copied count
2404 __ subw(length, length, rscratch1);
2405 __ addw(src_pos, src_pos, rscratch1);
2406 __ addw(dst_pos, dst_pos, rscratch1);
2407 __ b(*stub->entry());
2408
2409 __ bind(*stub->continuation());
2410 return;
2411 }
2412
2413 // Handle inline type arrays
2414 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2415 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2416 }
2417
2418 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2419 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2420 }
2421
2422 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2423
2424 int elem_size = type2aelembytes(basic_type);
2425 int scale = exact_log2(elem_size);
2426
2427 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2428 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2429 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2430 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2431
2432 // test for null
2433 if (flags & LIR_OpArrayCopy::src_null_check) {
2434 __ cbz(src, *stub->entry());
2435 }
2436 if (flags & LIR_OpArrayCopy::dst_null_check) {
2437 __ cbz(dst, *stub->entry());
2438 }
2439
2440 // If the compiler was not able to prove that exact type of the source or the destination
2441 // of the arraycopy is an array type, check at runtime if the source or the destination is
2991 __ verify_klass_ptr(tmp);
2992 #endif
2993 } else {
2994 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2995 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2996
2997 __ ldr(tmp, mdo_addr);
2998 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2999
3000 __ orr(tmp, tmp, TypeEntries::type_unknown);
3001 __ str(tmp, mdo_addr);
3002 // FIXME: Write barrier needed here?
3003 }
3004 }
3005
3006 __ bind(next);
3007 }
3008 COMMENT("} emit_profile_type");
3009 }
3010
3011 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3012 Register obj = op->obj()->as_register();
3013 Register tmp = op->tmp()->as_pointer_register();
3014 bool not_null = op->not_null();
3015 int flag = op->flag();
3016
3017 Label not_inline_type;
3018 if (!not_null) {
3019 __ cbz(obj, not_inline_type);
3020 }
3021
3022 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3023
3024 Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
3025 __ ldrb(rscratch1, mdo_addr);
3026 __ orr(rscratch1, rscratch1, flag);
3027 __ strb(rscratch1, mdo_addr);
3028
3029 __ bind(not_inline_type);
3030 }
3031
3032 void LIR_Assembler::align_backward_branch_target() {
3033 }
3034
3035
3036 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3037 // tmp must be unused
3038 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3039
3040 if (left->is_single_cpu()) {
3041 assert(dest->is_single_cpu(), "expect single result reg");
3042 __ negw(dest->as_register(), left->as_register());
3043 } else if (left->is_double_cpu()) {
3044 assert(dest->is_double_cpu(), "expect double result reg");
3045 __ neg(dest->as_register_lo(), left->as_register_lo());
3046 } else if (left->is_single_fpu()) {
3047 assert(dest->is_single_fpu(), "expect single float result reg");
3048 __ fnegs(dest->as_float_reg(), left->as_float_reg());
3049 } else {
3050 assert(left->is_double_fpu(), "expect double float operand reg");
3151 void LIR_Assembler::membar_loadload() {
3152 __ membar(Assembler::LoadLoad);
3153 }
3154
3155 void LIR_Assembler::membar_storestore() {
3156 __ membar(MacroAssembler::StoreStore);
3157 }
3158
3159 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3160
3161 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3162
3163 void LIR_Assembler::on_spin_wait() {
3164 __ spin_wait();
3165 }
3166
3167 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3168 __ mov(result_reg->as_register(), rthread);
3169 }
3170
3171 void LIR_Assembler::check_orig_pc() {
3172 __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3173 __ cmp(rscratch2, (u1)NULL_WORD);
3174 }
3175
3176 void LIR_Assembler::peephole(LIR_List *lir) {
3177 #if 0
3178 if (tableswitch_count >= max_tableswitches)
3179 return;
3180
3181 /*
3182 This finite-state automaton recognizes sequences of compare-and-
3183 branch instructions. We will turn them into a tableswitch. You
3184 could argue that C1 really shouldn't be doing this sort of
3185 optimization, but without it the code is really horrible.
3186 */
3187
3188 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3189 int first_key, last_key = -2147483648;
3190 int next_key = 0;
3191 int start_insn = -1;
3192 int last_insn = -1;
3193 Register reg = noreg;
3194 LIR_Opr reg_opr;
|