15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "code/aotCodeCache.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_aarch64.inline.hpp"
47
48
49 #ifndef PRODUCT
50 #define COMMENT(x) do { __ block_comment(x); } while (0)
51 #else
52 #define COMMENT(x)
53 #endif
54
55 NEEDS_CLEANUP // remove this definitions ?
56 const Register SYNC_header = r0; // synchronization header
57 const Register SHIFT_count = r0; // where count for shift operations must be
58
59 #define __ _masm->
60
61
410 MonitorExitStub* stub = nullptr;
411 if (method()->is_synchronized()) {
412 monitor_address(0, FrameMap::r0_opr);
413 stub = new MonitorExitStub(FrameMap::r0_opr, 0);
414 __ unlock_object(r5, r4, r0, r6, *stub->entry());
415 __ bind(*stub->continuation());
416 }
417
418 if (compilation()->env()->dtrace_method_probes()) {
419 __ mov(c_rarg0, rthread);
420 __ mov_metadata(c_rarg1, method()->constant_encoding());
421 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
422 }
423
424 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
425 __ mov(r0, r19); // Restore the exception
426 }
427
428 // remove the activation and dispatch to the unwind handler
429 __ block_comment("remove_frame and dispatch to the unwind handler");
430 __ remove_frame(initial_frame_size_in_bytes());
431 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
432
433 // Emit the slow path assembly
434 if (stub != nullptr) {
435 stub->emit_code(this);
436 }
437
438 return offset;
439 }
440
441
442 int LIR_Assembler::emit_deopt_handler() {
443 // generate code for exception handler
444 address handler_base = __ start_a_stub(deopt_handler_size());
445 if (handler_base == nullptr) {
446 // not enough space left for the handler
447 bailout("deopt handler overflow");
448 return -1;
449 }
450
462 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
463 "out of bounds read in post-call NOP check");
464 __ end_a_stub();
465
466 return entry_offset;
467 }
468
469 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
470 _masm->code_section()->relocate(adr, relocInfo::poll_type);
471 int pc_offset = code_offset();
472 flush_debug_info(pc_offset);
473 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
474 if (info->exception_handlers() != nullptr) {
475 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
476 }
477 }
478
479 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
480 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
481
482 // Pop the stack before the safepoint code
483 __ remove_frame(initial_frame_size_in_bytes());
484
485 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
486 __ reserved_stack_check();
487 }
488
489 code_stub->set_safepoint_offset(__ offset());
490 __ relocate(relocInfo::poll_return_type);
491 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
492 __ ret(lr);
493 }
494
495 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
496 guarantee(info != nullptr, "Shouldn't be null");
497 __ get_polling_page(rscratch1, relocInfo::poll_type);
498 add_debug_info_for_branch(info); // This isn't just debug info:
499 // it's the oop map
500 __ read_polling_page(rscratch1, relocInfo::poll_type);
501 return __ offset();
502 }
503
504
505 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
506 if (from_reg == r31_sp)
507 from_reg = sp;
508 if (to_reg == r31_sp)
509 to_reg = sp;
510 __ mov(to_reg, from_reg);
511 }
512
513 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
514
530 __ mov(dest->as_register(), c->as_jint());
531 break;
532 }
533
534 case T_LONG: {
535 assert(patch_code == lir_patch_none, "no patching handled here");
536 #if INCLUDE_CDS
537 if (AOTCodeCache::is_on_for_dump()) {
538 address b = c->as_pointer();
539 if (AOTRuntimeConstants::contains(b)) {
540 __ load_aotrc_address(dest->as_register_lo(), b);
541 break;
542 }
543 }
544 #endif
545 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
546 break;
547 }
548
549 case T_OBJECT: {
550 if (patch_code == lir_patch_none) {
551 jobject2reg(c->as_jobject(), dest->as_register());
552 } else {
553 jobject2reg_with_patching(dest->as_register(), info);
554 }
555 break;
556 }
557
558 case T_METADATA: {
559 if (patch_code != lir_patch_none) {
560 klass2reg_with_patching(dest->as_register(), info);
561 } else {
562 __ mov_metadata(dest->as_register(), c->as_metadata());
563 }
564 break;
565 }
566
567 case T_FLOAT: {
568 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
569 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
570 } else {
571 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
572 __ ldrs(dest->as_float_reg(), Address(rscratch1));
573 }
643 LIR_Const* c = src->as_constant_ptr();
644 LIR_Address* to_addr = dest->as_address_ptr();
645
646 void (Assembler::* insn)(Register Rt, const Address &adr);
647
648 switch (type) {
649 case T_ADDRESS:
650 assert(c->as_jint() == 0, "should be");
651 insn = &Assembler::str;
652 break;
653 case T_LONG:
654 assert(c->as_jlong() == 0, "should be");
655 insn = &Assembler::str;
656 break;
657 case T_INT:
658 assert(c->as_jint() == 0, "should be");
659 insn = &Assembler::strw;
660 break;
661 case T_OBJECT:
662 case T_ARRAY:
663 assert(c->as_jobject() == nullptr, "should be");
664 if (UseCompressedOops && !wide) {
665 insn = &Assembler::strw;
666 } else {
667 insn = &Assembler::str;
668 }
669 break;
670 case T_CHAR:
671 case T_SHORT:
672 assert(c->as_jint() == 0, "should be");
673 insn = &Assembler::strh;
674 break;
675 case T_BOOLEAN:
676 case T_BYTE:
677 assert(c->as_jint() == 0, "should be");
678 insn = &Assembler::strb;
679 break;
680 default:
681 ShouldNotReachHere();
682 insn = &Assembler::str; // unreachable
990 case T_CHAR:
991 __ ldrh(dest->as_register(), as_Address(from_addr));
992 break;
993 case T_SHORT:
994 __ ldrsh(dest->as_register(), as_Address(from_addr));
995 break;
996
997 default:
998 ShouldNotReachHere();
999 }
1000
1001 if (is_reference_type(type)) {
1002 if (UseCompressedOops && !wide) {
1003 __ decode_heap_oop(dest->as_register());
1004 }
1005
1006 __ verify_oop(dest->as_register());
1007 }
1008 }
1009
1010
1011 int LIR_Assembler::array_element_size(BasicType type) const {
1012 int elem_size = type2aelembytes(type);
1013 return exact_log2(elem_size);
1014 }
1015
1016
1017 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1018 switch (op->code()) {
1019 case lir_idiv:
1020 case lir_irem:
1021 arithmetic_idiv(op->code(),
1022 op->in_opr1(),
1023 op->in_opr2(),
1024 op->in_opr3(),
1025 op->result_opr(),
1026 op->info());
1027 break;
1028 case lir_fmad:
1029 __ fmaddd(op->result_opr()->as_double_reg(),
1181 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1182 __ ldarb(rscratch1, rscratch1);
1183 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1184 add_debug_info_for_null_check_here(op->stub()->info());
1185 __ br(Assembler::NE, *op->stub()->entry());
1186 }
1187 __ allocate_object(op->obj()->as_register(),
1188 op->tmp1()->as_register(),
1189 op->tmp2()->as_register(),
1190 op->header_size(),
1191 op->object_size(),
1192 op->klass()->as_register(),
1193 *op->stub()->entry());
1194 __ bind(*op->stub()->continuation());
1195 }
1196
1197 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1198 Register len = op->len()->as_register();
1199 __ uxtw(len, len);
1200
1201 if (UseSlowPath ||
1202 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1203 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1204 __ b(*op->stub()->entry());
1205 } else {
1206 Register tmp1 = op->tmp1()->as_register();
1207 Register tmp2 = op->tmp2()->as_register();
1208 Register tmp3 = op->tmp3()->as_register();
1209 if (len == tmp1) {
1210 tmp1 = tmp3;
1211 } else if (len == tmp2) {
1212 tmp2 = tmp3;
1213 } else if (len == tmp3) {
1214 // everything is ok
1215 } else {
1216 __ mov(tmp3, len);
1217 }
1218 __ allocate_array(op->obj()->as_register(),
1219 len,
1220 tmp1,
1221 tmp2,
1261 assert(data != nullptr, "need data for type check");
1262 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1263 }
1264 Label* success_target = success;
1265 Label* failure_target = failure;
1266
1267 if (obj == k_RInfo) {
1268 k_RInfo = dst;
1269 } else if (obj == klass_RInfo) {
1270 klass_RInfo = dst;
1271 }
1272 if (k->is_loaded() && !UseCompressedClassPointers) {
1273 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1274 } else {
1275 Rtmp1 = op->tmp3()->as_register();
1276 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1277 }
1278
1279 assert_different_registers(obj, k_RInfo, klass_RInfo);
1280
1281 if (should_profile) {
1282 Register mdo = klass_RInfo;
1283 __ mov_metadata(mdo, md->constant_encoding());
1284 Label not_null;
1285 __ cbnz(obj, not_null);
1286 // Object is null; update MDO and exit
1287 Address data_addr
1288 = __ form_address(rscratch2, mdo,
1289 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1290 0);
1291 __ ldrb(rscratch1, data_addr);
1292 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1293 __ strb(rscratch1, data_addr);
1294 __ b(*obj_is_null);
1295 __ bind(not_null);
1296
1297 Register recv = k_RInfo;
1298 __ load_klass(recv, obj);
1299 type_profile_helper(mdo, md, data, recv);
1300 } else {
1301 __ cbz(obj, *obj_is_null);
1302 }
1303
1304 if (!k->is_loaded()) {
1305 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1306 } else {
1307 __ mov_metadata(k_RInfo, k->constant_encoding());
1308 }
1309 __ verify_oop(obj);
1310
1311 if (op->fast_check()) {
1312 // get object class
1313 // not a safepoint as obj null check happens earlier
1314 __ load_klass(rscratch1, obj);
1315 __ cmp( rscratch1, k_RInfo);
1316
1317 __ br(Assembler::NE, *failure_target);
1318 // successful cast, fall through to profile or jump
1319 } else {
1320 // get object class
1321 // not a safepoint as obj null check happens earlier
1322 __ load_klass(klass_RInfo, obj);
1323 if (k->is_loaded()) {
1324 // See if we get an immediate positive hit
1325 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1326 __ cmp(k_RInfo, rscratch1);
1327 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1328 __ br(Assembler::NE, *failure_target);
1329 // successful cast, fall through to profile or jump
1330 } else {
1331 // See if we get an immediate positive hit
1332 __ br(Assembler::EQ, *success_target);
1333 // check for self
1334 __ cmp(klass_RInfo, k_RInfo);
1335 __ br(Assembler::EQ, *success_target);
1336
1337 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1338 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1339 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1340 // result is a boolean
1341 __ cbzw(klass_RInfo, *failure_target);
1342 // successful cast, fall through to profile or jump
1343 }
1344 } else {
1345 // perform the fast part of the checking logic
1346 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1347 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1348 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1349 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1350 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1351 // result is a boolean
1352 __ cbz(k_RInfo, *failure_target);
1353 // successful cast, fall through to profile or jump
1354 }
1435 __ bind(success);
1436 if (dst != obj) {
1437 __ mov(dst, obj);
1438 }
1439 } else if (code == lir_instanceof) {
1440 Register obj = op->object()->as_register();
1441 Register dst = op->result_opr()->as_register();
1442 Label success, failure, done;
1443 emit_typecheck_helper(op, &success, &failure, &failure);
1444 __ bind(failure);
1445 __ mov(dst, zr);
1446 __ b(done);
1447 __ bind(success);
1448 __ mov(dst, 1);
1449 __ bind(done);
1450 } else {
1451 ShouldNotReachHere();
1452 }
1453 }
1454
1455 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1456 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1457 __ cset(rscratch1, Assembler::NE);
1458 __ membar(__ AnyAny);
1459 }
1460
1461 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1462 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1463 __ cset(rscratch1, Assembler::NE);
1464 __ membar(__ AnyAny);
1465 }
1466
1467
1468 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1469 Register addr;
1470 if (op->addr()->is_register()) {
1471 addr = as_reg(op->addr());
1472 } else {
1473 assert(op->addr()->is_address(), "what else?");
1474 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1948 __ cmp(left->as_register_lo(), right->as_register_lo());
1949 __ mov(dst->as_register(), (uint64_t)-1L);
1950 __ br(Assembler::LT, done);
1951 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1952 __ bind(done);
1953 } else {
1954 ShouldNotReachHere();
1955 }
1956 }
1957
1958
1959 void LIR_Assembler::align_call(LIR_Code code) { }
1960
1961
1962 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1963 address call = __ trampoline_call(Address(op->addr(), rtype));
1964 if (call == nullptr) {
1965 bailout("trampoline stub overflow");
1966 return;
1967 }
1968 add_call_info(code_offset(), op->info());
1969 __ post_call_nop();
1970 }
1971
1972
1973 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1974 address call = __ ic_call(op->addr());
1975 if (call == nullptr) {
1976 bailout("trampoline stub overflow");
1977 return;
1978 }
1979 add_call_info(code_offset(), op->info());
1980 __ post_call_nop();
1981 }
1982
1983 void LIR_Assembler::emit_static_call_stub() {
1984 address call_pc = __ pc();
1985 address stub = __ start_a_stub(call_stub_size());
1986 if (stub == nullptr) {
1987 bailout("static call stub overflow");
1988 return;
1989 }
1990
1991 int start = __ offset();
1992
1993 __ relocate(static_stub_Relocation::spec(call_pc));
1994 __ emit_static_call_stub();
1995
1996 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
1997 <= call_stub_size(), "stub too big");
1998 __ end_a_stub();
1999 }
2122
2123
2124 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2125 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2126 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2127 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2128 __ mov (rscratch1, c);
2129 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2130 }
2131
2132
2133 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2134 ShouldNotReachHere();
2135 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2136 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2137 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2138 __ lea(rscratch1, __ constant_oop_address(o));
2139 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2140 }
2141
2142
2143 // This code replaces a call to arraycopy; no exception may
2144 // be thrown in this code, they must be thrown in the System.arraycopy
2145 // activation frame; we could save some checks if this would not be the case
2146 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2147 ciArrayKlass* default_type = op->expected_type();
2148 Register src = op->src()->as_register();
2149 Register dst = op->dst()->as_register();
2150 Register src_pos = op->src_pos()->as_register();
2151 Register dst_pos = op->dst_pos()->as_register();
2152 Register length = op->length()->as_register();
2153 Register tmp = op->tmp()->as_register();
2154
2155 CodeStub* stub = op->stub();
2156 int flags = op->flags();
2157 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2158 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2159
2160 // if we don't know anything, just go through the generic arraycopy
2161 if (default_type == nullptr // || basic_type == T_OBJECT
2162 ) {
2163 Label done;
2164 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2165
2166 // Save the arguments in case the generic arraycopy fails and we
2167 // have to fall back to the JNI stub
2168 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2169 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2170 __ str(src, Address(sp, 4*BytesPerWord));
2171
2172 address copyfunc_addr = StubRoutines::generic_arraycopy();
2173 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2174
2175 // The arguments are in java calling convention so we shift them
2176 // to C convention
2177 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2178 __ mov(c_rarg0, j_rarg0);
2179 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2193 __ cbz(r0, *stub->continuation());
2194
2195 // Reload values from the stack so they are where the stub
2196 // expects them.
2197 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2198 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2199 __ ldr(src, Address(sp, 4*BytesPerWord));
2200
2201 // r0 is -1^K where K == partial copied count
2202 __ eonw(rscratch1, r0, zr);
2203 // adjust length down and src/end pos up by partial copied count
2204 __ subw(length, length, rscratch1);
2205 __ addw(src_pos, src_pos, rscratch1);
2206 __ addw(dst_pos, dst_pos, rscratch1);
2207 __ b(*stub->entry());
2208
2209 __ bind(*stub->continuation());
2210 return;
2211 }
2212
2213 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2214
2215 int elem_size = type2aelembytes(basic_type);
2216 int scale = exact_log2(elem_size);
2217
2218 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2219 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2220
2221 // test for null
2222 if (flags & LIR_OpArrayCopy::src_null_check) {
2223 __ cbz(src, *stub->entry());
2224 }
2225 if (flags & LIR_OpArrayCopy::dst_null_check) {
2226 __ cbz(dst, *stub->entry());
2227 }
2228
2229 // If the compiler was not able to prove that exact type of the source or the destination
2230 // of the arraycopy is an array type, check at runtime if the source or the destination is
2231 // an instance type.
2232 if (flags & LIR_OpArrayCopy::type_check) {
2707 __ verify_klass_ptr(tmp);
2708 #endif
2709 } else {
2710 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2711 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2712
2713 __ ldr(tmp, mdo_addr);
2714 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2715
2716 __ orr(tmp, tmp, TypeEntries::type_unknown);
2717 __ str(tmp, mdo_addr);
2718 // FIXME: Write barrier needed here?
2719 }
2720 }
2721
2722 __ bind(next);
2723 }
2724 COMMENT("} emit_profile_type");
2725 }
2726
2727
2728 void LIR_Assembler::align_backward_branch_target() {
2729 }
2730
2731
2732 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2733 // tmp must be unused
2734 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2735
2736 if (left->is_single_cpu()) {
2737 assert(dest->is_single_cpu(), "expect single result reg");
2738 __ negw(dest->as_register(), left->as_register());
2739 } else if (left->is_double_cpu()) {
2740 assert(dest->is_double_cpu(), "expect double result reg");
2741 __ neg(dest->as_register_lo(), left->as_register_lo());
2742 } else if (left->is_single_fpu()) {
2743 assert(dest->is_single_fpu(), "expect single float result reg");
2744 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2745 } else {
2746 assert(left->is_double_fpu(), "expect double float operand reg");
2846 void LIR_Assembler::membar_loadload() {
2847 __ membar(Assembler::LoadLoad);
2848 }
2849
2850 void LIR_Assembler::membar_storestore() {
2851 __ membar(MacroAssembler::StoreStore);
2852 }
2853
2854 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2855
2856 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2857
2858 void LIR_Assembler::on_spin_wait() {
2859 __ spin_wait();
2860 }
2861
2862 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2863 __ mov(result_reg->as_register(), rthread);
2864 }
2865
2866
2867 void LIR_Assembler::peephole(LIR_List *lir) {
2868 #if 0
2869 if (tableswitch_count >= max_tableswitches)
2870 return;
2871
2872 /*
2873 This finite-state automaton recognizes sequences of compare-and-
2874 branch instructions. We will turn them into a tableswitch. You
2875 could argue that C1 really shouldn't be doing this sort of
2876 optimization, but without it the code is really horrible.
2877 */
2878
2879 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2880 int first_key, last_key = -2147483648;
2881 int next_key = 0;
2882 int start_insn = -1;
2883 int last_insn = -1;
2884 Register reg = noreg;
2885 LIR_Opr reg_opr;
|
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "ci/ciObjArrayKlass.hpp"
38 #include "code/aotCodeCache.hpp"
39 #include "code/compiledIC.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/gc_globals.hpp"
42 #include "nativeInst_aarch64.hpp"
43 #include "oops/objArrayKlass.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/frame.inline.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "utilities/powerOfTwo.hpp"
49 #include "vmreg_aarch64.inline.hpp"
50
51
52 #ifndef PRODUCT
53 #define COMMENT(x) do { __ block_comment(x); } while (0)
54 #else
55 #define COMMENT(x)
56 #endif
57
58 NEEDS_CLEANUP // remove this definitions ?
59 const Register SYNC_header = r0; // synchronization header
60 const Register SHIFT_count = r0; // where count for shift operations must be
61
62 #define __ _masm->
63
64
413 MonitorExitStub* stub = nullptr;
414 if (method()->is_synchronized()) {
415 monitor_address(0, FrameMap::r0_opr);
416 stub = new MonitorExitStub(FrameMap::r0_opr, 0);
417 __ unlock_object(r5, r4, r0, r6, *stub->entry());
418 __ bind(*stub->continuation());
419 }
420
421 if (compilation()->env()->dtrace_method_probes()) {
422 __ mov(c_rarg0, rthread);
423 __ mov_metadata(c_rarg1, method()->constant_encoding());
424 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
425 }
426
427 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
428 __ mov(r0, r19); // Restore the exception
429 }
430
431 // remove the activation and dispatch to the unwind handler
432 __ block_comment("remove_frame and dispatch to the unwind handler");
433 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
434 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
435
436 // Emit the slow path assembly
437 if (stub != nullptr) {
438 stub->emit_code(this);
439 }
440
441 return offset;
442 }
443
444
445 int LIR_Assembler::emit_deopt_handler() {
446 // generate code for exception handler
447 address handler_base = __ start_a_stub(deopt_handler_size());
448 if (handler_base == nullptr) {
449 // not enough space left for the handler
450 bailout("deopt handler overflow");
451 return -1;
452 }
453
465 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
466 "out of bounds read in post-call NOP check");
467 __ end_a_stub();
468
469 return entry_offset;
470 }
471
472 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
473 _masm->code_section()->relocate(adr, relocInfo::poll_type);
474 int pc_offset = code_offset();
475 flush_debug_info(pc_offset);
476 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
477 if (info->exception_handlers() != nullptr) {
478 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
479 }
480 }
481
482 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
483 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
484
485 if (InlineTypeReturnedAsFields) {
486 // Check if we are returning an non-null inline type and load its fields into registers
487 ciType* return_type = compilation()->method()->return_type();
488 if (return_type->is_inlinetype()) {
489 ciInlineKlass* vk = return_type->as_inline_klass();
490 if (vk->can_be_returned_as_fields()) {
491 address unpack_handler = vk->unpack_handler();
492 assert(unpack_handler != nullptr, "must be");
493 __ far_call(RuntimeAddress(unpack_handler));
494 }
495 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
496 Label skip;
497 Label not_null;
498 __ cbnz(r0, not_null);
499 // Returned value is null, zero all return registers because they may belong to oop fields
500 __ mov(j_rarg1, zr);
501 __ mov(j_rarg2, zr);
502 __ mov(j_rarg3, zr);
503 __ mov(j_rarg4, zr);
504 __ mov(j_rarg5, zr);
505 __ mov(j_rarg6, zr);
506 __ mov(j_rarg7, zr);
507 __ b(skip);
508 __ bind(not_null);
509
510 // Check if we are returning an non-null inline type and load its fields into registers
511 __ test_oop_is_not_inline_type(r0, rscratch2, skip, /* can_be_null= */ false);
512
513 // Load fields from a buffered value with an inline class specific handler
514 __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
515 __ ldr(rscratch1, Address(rscratch1, InlineKlass::adr_members_offset()));
516 __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
517 // Unpack handler can be null if inline type is not scalarizable in returns
518 __ cbz(rscratch1, skip);
519 __ blr(rscratch1);
520
521 __ bind(skip);
522 }
523 // At this point, r0 points to the value object (for interpreter or C1 caller).
524 // The fields of the object are copied into registers (for C2 caller).
525 }
526
527 // Pop the stack before the safepoint code
528 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
529
530 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
531 __ reserved_stack_check();
532 }
533
534 code_stub->set_safepoint_offset(__ offset());
535 __ relocate(relocInfo::poll_return_type);
536 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
537 __ ret(lr);
538 }
539
540 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
541 return (__ store_inline_type_fields_to_buf(vk, false));
542 }
543
544 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
545 guarantee(info != nullptr, "Shouldn't be null");
546 __ get_polling_page(rscratch1, relocInfo::poll_type);
547 add_debug_info_for_branch(info); // This isn't just debug info:
548 // it's the oop map
549 __ read_polling_page(rscratch1, relocInfo::poll_type);
550 return __ offset();
551 }
552
553
554 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
555 if (from_reg == r31_sp)
556 from_reg = sp;
557 if (to_reg == r31_sp)
558 to_reg = sp;
559 __ mov(to_reg, from_reg);
560 }
561
562 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
563
579 __ mov(dest->as_register(), c->as_jint());
580 break;
581 }
582
583 case T_LONG: {
584 assert(patch_code == lir_patch_none, "no patching handled here");
585 #if INCLUDE_CDS
586 if (AOTCodeCache::is_on_for_dump()) {
587 address b = c->as_pointer();
588 if (AOTRuntimeConstants::contains(b)) {
589 __ load_aotrc_address(dest->as_register_lo(), b);
590 break;
591 }
592 }
593 #endif
594 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
595 break;
596 }
597
598 case T_OBJECT: {
599 if (patch_code != lir_patch_none) {
600 jobject2reg_with_patching(dest->as_register(), info);
601 } else {
602 jobject2reg(c->as_jobject(), dest->as_register());
603 }
604 break;
605 }
606
607 case T_METADATA: {
608 if (patch_code != lir_patch_none) {
609 klass2reg_with_patching(dest->as_register(), info);
610 } else {
611 __ mov_metadata(dest->as_register(), c->as_metadata());
612 }
613 break;
614 }
615
616 case T_FLOAT: {
617 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
618 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
619 } else {
620 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
621 __ ldrs(dest->as_float_reg(), Address(rscratch1));
622 }
692 LIR_Const* c = src->as_constant_ptr();
693 LIR_Address* to_addr = dest->as_address_ptr();
694
695 void (Assembler::* insn)(Register Rt, const Address &adr);
696
697 switch (type) {
698 case T_ADDRESS:
699 assert(c->as_jint() == 0, "should be");
700 insn = &Assembler::str;
701 break;
702 case T_LONG:
703 assert(c->as_jlong() == 0, "should be");
704 insn = &Assembler::str;
705 break;
706 case T_INT:
707 assert(c->as_jint() == 0, "should be");
708 insn = &Assembler::strw;
709 break;
710 case T_OBJECT:
711 case T_ARRAY:
712 // Non-null case is not handled on aarch64 but handled on x86
713 // FIXME: do we need to add it here?
714 assert(c->as_jobject() == nullptr, "should be");
715 if (UseCompressedOops && !wide) {
716 insn = &Assembler::strw;
717 } else {
718 insn = &Assembler::str;
719 }
720 break;
721 case T_CHAR:
722 case T_SHORT:
723 assert(c->as_jint() == 0, "should be");
724 insn = &Assembler::strh;
725 break;
726 case T_BOOLEAN:
727 case T_BYTE:
728 assert(c->as_jint() == 0, "should be");
729 insn = &Assembler::strb;
730 break;
731 default:
732 ShouldNotReachHere();
733 insn = &Assembler::str; // unreachable
1041 case T_CHAR:
1042 __ ldrh(dest->as_register(), as_Address(from_addr));
1043 break;
1044 case T_SHORT:
1045 __ ldrsh(dest->as_register(), as_Address(from_addr));
1046 break;
1047
1048 default:
1049 ShouldNotReachHere();
1050 }
1051
1052 if (is_reference_type(type)) {
1053 if (UseCompressedOops && !wide) {
1054 __ decode_heap_oop(dest->as_register());
1055 }
1056
1057 __ verify_oop(dest->as_register());
1058 }
1059 }
1060
1061 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1062 assert(dst->is_cpu_register(), "must be");
1063 assert(dst->type() == src->type(), "must be");
1064
1065 if (src->is_cpu_register()) {
1066 reg2reg(src, dst);
1067 } else if (src->is_stack()) {
1068 stack2reg(src, dst, dst->type());
1069 } else if (src->is_constant()) {
1070 const2reg(src, dst, lir_patch_none, nullptr);
1071 } else {
1072 ShouldNotReachHere();
1073 }
1074 }
1075
1076 int LIR_Assembler::array_element_size(BasicType type) const {
1077 int elem_size = type2aelembytes(type);
1078 return exact_log2(elem_size);
1079 }
1080
1081
1082 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1083 switch (op->code()) {
1084 case lir_idiv:
1085 case lir_irem:
1086 arithmetic_idiv(op->code(),
1087 op->in_opr1(),
1088 op->in_opr2(),
1089 op->in_opr3(),
1090 op->result_opr(),
1091 op->info());
1092 break;
1093 case lir_fmad:
1094 __ fmaddd(op->result_opr()->as_double_reg(),
1246 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1247 __ ldarb(rscratch1, rscratch1);
1248 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1249 add_debug_info_for_null_check_here(op->stub()->info());
1250 __ br(Assembler::NE, *op->stub()->entry());
1251 }
1252 __ allocate_object(op->obj()->as_register(),
1253 op->tmp1()->as_register(),
1254 op->tmp2()->as_register(),
1255 op->header_size(),
1256 op->object_size(),
1257 op->klass()->as_register(),
1258 *op->stub()->entry());
1259 __ bind(*op->stub()->continuation());
1260 }
1261
1262 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1263 Register len = op->len()->as_register();
1264 __ uxtw(len, len);
1265
1266 if (UseSlowPath || op->always_slow_path() ||
1267 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1268 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1269 __ b(*op->stub()->entry());
1270 } else {
1271 Register tmp1 = op->tmp1()->as_register();
1272 Register tmp2 = op->tmp2()->as_register();
1273 Register tmp3 = op->tmp3()->as_register();
1274 if (len == tmp1) {
1275 tmp1 = tmp3;
1276 } else if (len == tmp2) {
1277 tmp2 = tmp3;
1278 } else if (len == tmp3) {
1279 // everything is ok
1280 } else {
1281 __ mov(tmp3, len);
1282 }
1283 __ allocate_array(op->obj()->as_register(),
1284 len,
1285 tmp1,
1286 tmp2,
1326 assert(data != nullptr, "need data for type check");
1327 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1328 }
1329 Label* success_target = success;
1330 Label* failure_target = failure;
1331
1332 if (obj == k_RInfo) {
1333 k_RInfo = dst;
1334 } else if (obj == klass_RInfo) {
1335 klass_RInfo = dst;
1336 }
1337 if (k->is_loaded() && !UseCompressedClassPointers) {
1338 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1339 } else {
1340 Rtmp1 = op->tmp3()->as_register();
1341 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1342 }
1343
1344 assert_different_registers(obj, k_RInfo, klass_RInfo);
1345
1346 if (op->need_null_check()) {
1347 if (should_profile) {
1348 Register mdo = klass_RInfo;
1349 __ mov_metadata(mdo, md->constant_encoding());
1350 Label not_null;
1351 __ cbnz(obj, not_null);
1352 // Object is null; update MDO and exit
1353 Address data_addr
1354 = __ form_address(rscratch2, mdo,
1355 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1356 0);
1357 __ ldrb(rscratch1, data_addr);
1358 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1359 __ strb(rscratch1, data_addr);
1360 __ b(*obj_is_null);
1361 __ bind(not_null);
1362
1363 Register recv = k_RInfo;
1364 __ load_klass(recv, obj);
1365 type_profile_helper(mdo, md, data, recv);
1366 } else {
1367 __ cbz(obj, *obj_is_null);
1368 }
1369 }
1370
1371 if (!k->is_loaded()) {
1372 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1373 } else {
1374 __ mov_metadata(k_RInfo, k->constant_encoding());
1375 }
1376 __ verify_oop(obj);
1377
1378 if (op->fast_check()) {
1379 assert(!k->is_loaded() || !k->is_obj_array_klass(), "Use refined array for a direct pointer comparison");
1380 // get object class
1381 // not a safepoint as obj null check happens earlier
1382 __ load_klass(rscratch1, obj);
1383 __ cmp( rscratch1, k_RInfo);
1384
1385 __ br(Assembler::NE, *failure_target);
1386 // successful cast, fall through to profile or jump
1387 } else {
1388 // get object class
1389 // not a safepoint as obj null check happens earlier
1390 __ load_klass(klass_RInfo, obj);
1391 if (k->is_loaded()) {
1392 // See if we get an immediate positive hit
1393 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1394 __ cmp(k_RInfo, rscratch1);
1395 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1396 __ br(Assembler::NE, *failure_target);
1397 // successful cast, fall through to profile or jump
1398 } else {
1399 // See if we get an immediate positive hit
1400 __ br(Assembler::EQ, *success_target);
1401 // check for self
1402 if (k->is_loaded() && k->is_obj_array_klass()) {
1403 // For a direct pointer comparison, we need the refined array klass pointer
1404 ciKlass* k_refined = ciObjArrayKlass::make(k->as_obj_array_klass()->element_klass());
1405 __ mov_metadata(rscratch1, k_refined->constant_encoding());
1406 __ cmp(klass_RInfo, rscratch1);
1407 } else {
1408 __ cmp(klass_RInfo, k_RInfo);
1409 }
1410 __ br(Assembler::EQ, *success_target);
1411
1412 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1413 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1414 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1415 // result is a boolean
1416 __ cbzw(klass_RInfo, *failure_target);
1417 // successful cast, fall through to profile or jump
1418 }
1419 } else {
1420 // perform the fast part of the checking logic
1421 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1422 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1423 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1424 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1425 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1426 // result is a boolean
1427 __ cbz(k_RInfo, *failure_target);
1428 // successful cast, fall through to profile or jump
1429 }
1510 __ bind(success);
1511 if (dst != obj) {
1512 __ mov(dst, obj);
1513 }
1514 } else if (code == lir_instanceof) {
1515 Register obj = op->object()->as_register();
1516 Register dst = op->result_opr()->as_register();
1517 Label success, failure, done;
1518 emit_typecheck_helper(op, &success, &failure, &failure);
1519 __ bind(failure);
1520 __ mov(dst, zr);
1521 __ b(done);
1522 __ bind(success);
1523 __ mov(dst, 1);
1524 __ bind(done);
1525 } else {
1526 ShouldNotReachHere();
1527 }
1528 }
1529
1530 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1531 // We are loading/storing from/to an array that *may* be a flat array (the
1532 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1533 // If this array is a flat array, take the slow path.
1534 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1535 if (!op->value()->is_illegal()) {
1536 // The array is not a flat array, but it might be null-free. If we are storing
1537 // a null into a null-free array, take the slow path (which will throw NPE).
1538 Label skip;
1539 __ cbnz(op->value()->as_register(), skip);
1540 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1541 __ bind(skip);
1542 }
1543 }
1544
1545 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1546 // We are storing into an array that *may* be null-free (the declared type is
1547 // Object[], abstract[], interface[] or VT.ref[]).
1548 Label test_mark_word;
1549 Register tmp = op->tmp()->as_register();
1550 __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1551 __ tst(tmp, markWord::unlocked_value);
1552 __ br(Assembler::NE, test_mark_word);
1553 __ load_prototype_header(tmp, op->array()->as_register());
1554 __ bind(test_mark_word);
1555 __ tst(tmp, markWord::null_free_array_bit_in_place);
1556 }
1557
1558 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1559 Label L_oops_equal;
1560 Label L_oops_not_equal;
1561 Label L_end;
1562
1563 Register left = op->left()->as_register();
1564 Register right = op->right()->as_register();
1565
1566 __ cmp(left, right);
1567 __ br(Assembler::EQ, L_oops_equal);
1568
1569 // (1) Null check -- if one of the operands is null, the other must not be null (because
1570 // the two references are not equal), so they are not substitutable,
1571 // FIXME: do null check only if the operand is nullable
1572 {
1573 __ cbz(left, L_oops_not_equal);
1574 __ cbz(right, L_oops_not_equal);
1575 }
1576
1577 ciKlass* left_klass = op->left_klass();
1578 ciKlass* right_klass = op->right_klass();
1579
1580 // (2) Inline type check -- if either of the operands is not a inline type,
1581 // they are not substitutable. We do this only if we are not sure that the
1582 // operands are inline type
1583 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1584 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1585 Register tmp1 = op->tmp1()->as_register();
1586 __ mov(tmp1, markWord::inline_type_pattern);
1587 __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1588 __ andr(tmp1, tmp1, rscratch1);
1589 __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1590 __ andr(tmp1, tmp1, rscratch1);
1591 __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1592 __ br(Assembler::NE, L_oops_not_equal);
1593 }
1594
1595 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1596 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1597 // No need to load klass -- the operands are statically known to be the same inline klass.
1598 __ b(*op->stub()->entry());
1599 } else {
1600 Register left_klass_op = op->left_klass_op()->as_register();
1601 Register right_klass_op = op->right_klass_op()->as_register();
1602
1603 if (UseCompressedClassPointers) {
1604 __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1605 __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1606 __ cmpw(left_klass_op, right_klass_op);
1607 } else {
1608 __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1609 __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1610 __ cmp(left_klass_op, right_klass_op);
1611 }
1612
1613 __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1614 // fall through to L_oops_not_equal
1615 }
1616
1617 __ bind(L_oops_not_equal);
1618 move(op->not_equal_result(), op->result_opr());
1619 __ b(L_end);
1620
1621 __ bind(L_oops_equal);
1622 move(op->equal_result(), op->result_opr());
1623 __ b(L_end);
1624
1625 // We've returned from the stub. R0 contains 0x0 IFF the two
1626 // operands are not substitutable. (Don't compare against 0x1 in case the
1627 // C compiler is naughty)
1628 __ bind(*op->stub()->continuation());
1629 __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1630 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1631 // fall-through
1632 __ bind(L_end);
1633 }
1634
1635
1636 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1637 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1638 __ cset(rscratch1, Assembler::NE);
1639 __ membar(__ AnyAny);
1640 }
1641
1642 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1643 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1644 __ cset(rscratch1, Assembler::NE);
1645 __ membar(__ AnyAny);
1646 }
1647
1648
1649 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1650 Register addr;
1651 if (op->addr()->is_register()) {
1652 addr = as_reg(op->addr());
1653 } else {
1654 assert(op->addr()->is_address(), "what else?");
1655 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
2129 __ cmp(left->as_register_lo(), right->as_register_lo());
2130 __ mov(dst->as_register(), (uint64_t)-1L);
2131 __ br(Assembler::LT, done);
2132 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2133 __ bind(done);
2134 } else {
2135 ShouldNotReachHere();
2136 }
2137 }
2138
2139
2140 void LIR_Assembler::align_call(LIR_Code code) { }
2141
2142
2143 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2144 address call = __ trampoline_call(Address(op->addr(), rtype));
2145 if (call == nullptr) {
2146 bailout("trampoline stub overflow");
2147 return;
2148 }
2149 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2150 __ post_call_nop();
2151 }
2152
2153
2154 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2155 address call = __ ic_call(op->addr());
2156 if (call == nullptr) {
2157 bailout("trampoline stub overflow");
2158 return;
2159 }
2160 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2161 __ post_call_nop();
2162 }
2163
2164 void LIR_Assembler::emit_static_call_stub() {
2165 address call_pc = __ pc();
2166 address stub = __ start_a_stub(call_stub_size());
2167 if (stub == nullptr) {
2168 bailout("static call stub overflow");
2169 return;
2170 }
2171
2172 int start = __ offset();
2173
2174 __ relocate(static_stub_Relocation::spec(call_pc));
2175 __ emit_static_call_stub();
2176
2177 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2178 <= call_stub_size(), "stub too big");
2179 __ end_a_stub();
2180 }
2303
2304
2305 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2306 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2307 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2308 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2309 __ mov (rscratch1, c);
2310 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2311 }
2312
2313
2314 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2315 ShouldNotReachHere();
2316 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2317 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2318 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2319 __ lea(rscratch1, __ constant_oop_address(o));
2320 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2321 }
2322
2323 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2324 if (null_check) {
2325 __ cbz(obj, *slow_path->entry());
2326 }
2327 if (is_dest) {
2328 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2329 // TODO 8350865 Flat no longer implies null-free, so we need to check for flat dest. Can we do better here?
2330 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2331 } else {
2332 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2333 }
2334 }
2335
2336 // This code replaces a call to arraycopy; no exception may
2337 // be thrown in this code, they must be thrown in the System.arraycopy
2338 // activation frame; we could save some checks if this would not be the case
2339 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2340 ciArrayKlass* default_type = op->expected_type();
2341 Register src = op->src()->as_register();
2342 Register dst = op->dst()->as_register();
2343 Register src_pos = op->src_pos()->as_register();
2344 Register dst_pos = op->dst_pos()->as_register();
2345 Register length = op->length()->as_register();
2346 Register tmp = op->tmp()->as_register();
2347
2348 CodeStub* stub = op->stub();
2349 int flags = op->flags();
2350 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2351 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2352
2353 if (flags & LIR_OpArrayCopy::always_slow_path) {
2354 __ b(*stub->entry());
2355 __ bind(*stub->continuation());
2356 return;
2357 }
2358
2359 // if we don't know anything, just go through the generic arraycopy
2360 if (default_type == nullptr // || basic_type == T_OBJECT
2361 ) {
2362 Label done;
2363 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2364
2365 // Save the arguments in case the generic arraycopy fails and we
2366 // have to fall back to the JNI stub
2367 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2368 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2369 __ str(src, Address(sp, 4*BytesPerWord));
2370
2371 address copyfunc_addr = StubRoutines::generic_arraycopy();
2372 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2373
2374 // The arguments are in java calling convention so we shift them
2375 // to C convention
2376 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2377 __ mov(c_rarg0, j_rarg0);
2378 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2392 __ cbz(r0, *stub->continuation());
2393
2394 // Reload values from the stack so they are where the stub
2395 // expects them.
2396 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2397 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2398 __ ldr(src, Address(sp, 4*BytesPerWord));
2399
2400 // r0 is -1^K where K == partial copied count
2401 __ eonw(rscratch1, r0, zr);
2402 // adjust length down and src/end pos up by partial copied count
2403 __ subw(length, length, rscratch1);
2404 __ addw(src_pos, src_pos, rscratch1);
2405 __ addw(dst_pos, dst_pos, rscratch1);
2406 __ b(*stub->entry());
2407
2408 __ bind(*stub->continuation());
2409 return;
2410 }
2411
2412 // Handle inline type arrays
2413 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2414 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2415 }
2416 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2417 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2418 }
2419
2420 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2421
2422 int elem_size = type2aelembytes(basic_type);
2423 int scale = exact_log2(elem_size);
2424
2425 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2426 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2427
2428 // test for null
2429 if (flags & LIR_OpArrayCopy::src_null_check) {
2430 __ cbz(src, *stub->entry());
2431 }
2432 if (flags & LIR_OpArrayCopy::dst_null_check) {
2433 __ cbz(dst, *stub->entry());
2434 }
2435
2436 // If the compiler was not able to prove that exact type of the source or the destination
2437 // of the arraycopy is an array type, check at runtime if the source or the destination is
2438 // an instance type.
2439 if (flags & LIR_OpArrayCopy::type_check) {
2914 __ verify_klass_ptr(tmp);
2915 #endif
2916 } else {
2917 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2918 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2919
2920 __ ldr(tmp, mdo_addr);
2921 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2922
2923 __ orr(tmp, tmp, TypeEntries::type_unknown);
2924 __ str(tmp, mdo_addr);
2925 // FIXME: Write barrier needed here?
2926 }
2927 }
2928
2929 __ bind(next);
2930 }
2931 COMMENT("} emit_profile_type");
2932 }
2933
2934 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
2935 Register obj = op->obj()->as_register();
2936 Register tmp = op->tmp()->as_pointer_register();
2937 bool not_null = op->not_null();
2938 int flag = op->flag();
2939
2940 Label not_inline_type;
2941 if (!not_null) {
2942 __ cbz(obj, not_inline_type);
2943 }
2944
2945 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
2946
2947 Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
2948 __ ldrb(rscratch1, mdo_addr);
2949 __ orr(rscratch1, rscratch1, flag);
2950 __ strb(rscratch1, mdo_addr);
2951
2952 __ bind(not_inline_type);
2953 }
2954
2955 void LIR_Assembler::align_backward_branch_target() {
2956 }
2957
2958
2959 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2960 // tmp must be unused
2961 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2962
2963 if (left->is_single_cpu()) {
2964 assert(dest->is_single_cpu(), "expect single result reg");
2965 __ negw(dest->as_register(), left->as_register());
2966 } else if (left->is_double_cpu()) {
2967 assert(dest->is_double_cpu(), "expect double result reg");
2968 __ neg(dest->as_register_lo(), left->as_register_lo());
2969 } else if (left->is_single_fpu()) {
2970 assert(dest->is_single_fpu(), "expect single float result reg");
2971 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2972 } else {
2973 assert(left->is_double_fpu(), "expect double float operand reg");
3073 void LIR_Assembler::membar_loadload() {
3074 __ membar(Assembler::LoadLoad);
3075 }
3076
3077 void LIR_Assembler::membar_storestore() {
3078 __ membar(MacroAssembler::StoreStore);
3079 }
3080
3081 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3082
3083 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3084
3085 void LIR_Assembler::on_spin_wait() {
3086 __ spin_wait();
3087 }
3088
3089 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3090 __ mov(result_reg->as_register(), rthread);
3091 }
3092
3093 void LIR_Assembler::check_orig_pc() {
3094 __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3095 __ cmp(rscratch2, (u1)NULL_WORD);
3096 }
3097
3098 void LIR_Assembler::peephole(LIR_List *lir) {
3099 #if 0
3100 if (tableswitch_count >= max_tableswitches)
3101 return;
3102
3103 /*
3104 This finite-state automaton recognizes sequences of compare-and-
3105 branch instructions. We will turn them into a tableswitch. You
3106 could argue that C1 really shouldn't be doing this sort of
3107 optimization, but without it the code is really horrible.
3108 */
3109
3110 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3111 int first_key, last_key = -2147483648;
3112 int next_key = 0;
3113 int start_insn = -1;
3114 int last_insn = -1;
3115 Register reg = noreg;
3116 LIR_Opr reg_opr;
|