15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "code/aotCodeCache.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_aarch64.inline.hpp"
47
48
49 #ifndef PRODUCT
50 #define COMMENT(x) do { __ block_comment(x); } while (0)
51 #else
52 #define COMMENT(x)
53 #endif
54
55 NEEDS_CLEANUP // remove this definitions ?
56 const Register SYNC_header = r0; // synchronization header
57 const Register SHIFT_count = r0; // where count for shift operations must be
58
59 #define __ _masm->
60
61
394 MonitorExitStub* stub = nullptr;
395 if (method()->is_synchronized()) {
396 monitor_address(0, FrameMap::r0_opr);
397 stub = new MonitorExitStub(FrameMap::r0_opr, 0);
398 __ unlock_object(r5, r4, r0, r6, *stub->entry());
399 __ bind(*stub->continuation());
400 }
401
402 if (compilation()->env()->dtrace_method_probes()) {
403 __ mov(c_rarg0, rthread);
404 __ mov_metadata(c_rarg1, method()->constant_encoding());
405 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
406 }
407
408 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
409 __ mov(r0, r19); // Restore the exception
410 }
411
412 // remove the activation and dispatch to the unwind handler
413 __ block_comment("remove_frame and dispatch to the unwind handler");
414 __ remove_frame(initial_frame_size_in_bytes());
415 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
416
417 // Emit the slow path assembly
418 if (stub != nullptr) {
419 stub->emit_code(this);
420 }
421
422 return offset;
423 }
424
425
426 int LIR_Assembler::emit_deopt_handler() {
427 // generate code for exception handler
428 address handler_base = __ start_a_stub(deopt_handler_size());
429 if (handler_base == nullptr) {
430 // not enough space left for the handler
431 bailout("deopt handler overflow");
432 return -1;
433 }
434
446 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
447 "out of bounds read in post-call NOP check");
448 __ end_a_stub();
449
450 return entry_offset;
451 }
452
453 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
454 _masm->code_section()->relocate(adr, relocInfo::poll_type);
455 int pc_offset = code_offset();
456 flush_debug_info(pc_offset);
457 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
458 if (info->exception_handlers() != nullptr) {
459 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
460 }
461 }
462
463 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
464 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
465
466 // Pop the stack before the safepoint code
467 __ remove_frame(initial_frame_size_in_bytes());
468
469 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
470 __ reserved_stack_check();
471 }
472
473 code_stub->set_safepoint_offset(__ offset());
474 __ relocate(relocInfo::poll_return_type);
475 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
476 __ ret(lr);
477 }
478
479 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
480 guarantee(info != nullptr, "Shouldn't be null");
481 __ get_polling_page(rscratch1, relocInfo::poll_type);
482 add_debug_info_for_branch(info); // This isn't just debug info:
483 // it's the oop map
484 __ read_polling_page(rscratch1, relocInfo::poll_type);
485 return __ offset();
486 }
487
488
489 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
490 if (from_reg == r31_sp)
491 from_reg = sp;
492 if (to_reg == r31_sp)
493 to_reg = sp;
494 __ mov(to_reg, from_reg);
495 }
496
497 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
498
514 __ mov(dest->as_register(), c->as_jint());
515 break;
516 }
517
518 case T_LONG: {
519 assert(patch_code == lir_patch_none, "no patching handled here");
520 #if INCLUDE_CDS
521 if (AOTCodeCache::is_on_for_dump()) {
522 address b = c->as_pointer();
523 if (AOTRuntimeConstants::contains(b)) {
524 __ load_aotrc_address(dest->as_register_lo(), b);
525 break;
526 }
527 }
528 #endif
529 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
530 break;
531 }
532
533 case T_OBJECT: {
534 if (patch_code == lir_patch_none) {
535 jobject2reg(c->as_jobject(), dest->as_register());
536 } else {
537 jobject2reg_with_patching(dest->as_register(), info);
538 }
539 break;
540 }
541
542 case T_METADATA: {
543 if (patch_code != lir_patch_none) {
544 klass2reg_with_patching(dest->as_register(), info);
545 } else {
546 __ mov_metadata(dest->as_register(), c->as_metadata());
547 }
548 break;
549 }
550
551 case T_FLOAT: {
552 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
553 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
554 } else {
555 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
556 __ ldrs(dest->as_float_reg(), Address(rscratch1));
557 }
627 LIR_Const* c = src->as_constant_ptr();
628 LIR_Address* to_addr = dest->as_address_ptr();
629
630 void (Assembler::* insn)(Register Rt, const Address &adr);
631
632 switch (type) {
633 case T_ADDRESS:
634 assert(c->as_jint() == 0, "should be");
635 insn = &Assembler::str;
636 break;
637 case T_LONG:
638 assert(c->as_jlong() == 0, "should be");
639 insn = &Assembler::str;
640 break;
641 case T_INT:
642 assert(c->as_jint() == 0, "should be");
643 insn = &Assembler::strw;
644 break;
645 case T_OBJECT:
646 case T_ARRAY:
647 assert(c->as_jobject() == nullptr, "should be");
648 if (UseCompressedOops && !wide) {
649 insn = &Assembler::strw;
650 } else {
651 insn = &Assembler::str;
652 }
653 break;
654 case T_CHAR:
655 case T_SHORT:
656 assert(c->as_jint() == 0, "should be");
657 insn = &Assembler::strh;
658 break;
659 case T_BOOLEAN:
660 case T_BYTE:
661 assert(c->as_jint() == 0, "should be");
662 insn = &Assembler::strb;
663 break;
664 default:
665 ShouldNotReachHere();
666 insn = &Assembler::str; // unreachable
974 case T_CHAR:
975 __ ldrh(dest->as_register(), as_Address(from_addr));
976 break;
977 case T_SHORT:
978 __ ldrsh(dest->as_register(), as_Address(from_addr));
979 break;
980
981 default:
982 ShouldNotReachHere();
983 }
984
985 if (is_reference_type(type)) {
986 if (UseCompressedOops && !wide) {
987 __ decode_heap_oop(dest->as_register());
988 }
989
990 __ verify_oop(dest->as_register());
991 }
992 }
993
994
995 int LIR_Assembler::array_element_size(BasicType type) const {
996 int elem_size = type2aelembytes(type);
997 return exact_log2(elem_size);
998 }
999
1000
1001 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1002 switch (op->code()) {
1003 case lir_idiv:
1004 case lir_irem:
1005 arithmetic_idiv(op->code(),
1006 op->in_opr1(),
1007 op->in_opr2(),
1008 op->in_opr3(),
1009 op->result_opr(),
1010 op->info());
1011 break;
1012 case lir_fmad:
1013 __ fmaddd(op->result_opr()->as_double_reg(),
1165 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1166 __ ldarb(rscratch1, rscratch1);
1167 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1168 add_debug_info_for_null_check_here(op->stub()->info());
1169 __ br(Assembler::NE, *op->stub()->entry());
1170 }
1171 __ allocate_object(op->obj()->as_register(),
1172 op->tmp1()->as_register(),
1173 op->tmp2()->as_register(),
1174 op->header_size(),
1175 op->object_size(),
1176 op->klass()->as_register(),
1177 *op->stub()->entry());
1178 __ bind(*op->stub()->continuation());
1179 }
1180
1181 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1182 Register len = op->len()->as_register();
1183 __ uxtw(len, len);
1184
1185 if (UseSlowPath ||
1186 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1187 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1188 __ b(*op->stub()->entry());
1189 } else {
1190 Register tmp1 = op->tmp1()->as_register();
1191 Register tmp2 = op->tmp2()->as_register();
1192 Register tmp3 = op->tmp3()->as_register();
1193 if (len == tmp1) {
1194 tmp1 = tmp3;
1195 } else if (len == tmp2) {
1196 tmp2 = tmp3;
1197 } else if (len == tmp3) {
1198 // everything is ok
1199 } else {
1200 __ mov(tmp3, len);
1201 }
1202 __ allocate_array(op->obj()->as_register(),
1203 len,
1204 tmp1,
1205 tmp2,
1242 md = method->method_data_or_null();
1243 assert(md != nullptr, "Sanity");
1244 data = md->bci_to_data(bci);
1245 assert(data != nullptr, "need data for type check");
1246 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1247 }
1248 Label* success_target = success;
1249 Label* failure_target = failure;
1250
1251 if (obj == k_RInfo) {
1252 k_RInfo = dst;
1253 } else if (obj == klass_RInfo) {
1254 klass_RInfo = dst;
1255 }
1256
1257 Rtmp1 = op->tmp3()->as_register();
1258 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1259
1260 assert_different_registers(obj, k_RInfo, klass_RInfo);
1261
1262 if (should_profile) {
1263 Register mdo = klass_RInfo;
1264 __ mov_metadata(mdo, md->constant_encoding());
1265 Label not_null;
1266 __ cbnz(obj, not_null);
1267 // Object is null; update MDO and exit
1268 Address data_addr
1269 = __ form_address(rscratch2, mdo,
1270 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1271 0);
1272 __ ldrb(rscratch1, data_addr);
1273 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1274 __ strb(rscratch1, data_addr);
1275 __ b(*obj_is_null);
1276 __ bind(not_null);
1277
1278 Register recv = k_RInfo;
1279 __ load_klass(recv, obj);
1280 type_profile_helper(mdo, md, data, recv);
1281 } else {
1282 __ cbz(obj, *obj_is_null);
1283 }
1284
1285 if (!k->is_loaded()) {
1286 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1287 } else {
1288 __ mov_metadata(k_RInfo, k->constant_encoding());
1289 }
1290 __ verify_oop(obj);
1291
1292 if (op->fast_check()) {
1293 // get object class
1294 // not a safepoint as obj null check happens earlier
1295 __ load_klass(rscratch1, obj);
1296 __ cmp( rscratch1, k_RInfo);
1297
1298 __ br(Assembler::NE, *failure_target);
1299 // successful cast, fall through to profile or jump
1300 } else {
1301 // get object class
1302 // not a safepoint as obj null check happens earlier
1303 __ load_klass(klass_RInfo, obj);
1304 if (k->is_loaded()) {
1305 // See if we get an immediate positive hit
1306 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1307 __ cmp(k_RInfo, rscratch1);
1308 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1309 __ br(Assembler::NE, *failure_target);
1310 // successful cast, fall through to profile or jump
1311 } else {
1312 // See if we get an immediate positive hit
1313 __ br(Assembler::EQ, *success_target);
1314 // check for self
1315 __ cmp(klass_RInfo, k_RInfo);
1316 __ br(Assembler::EQ, *success_target);
1317
1318 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1319 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1320 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1321 // result is a boolean
1322 __ cbzw(klass_RInfo, *failure_target);
1323 // successful cast, fall through to profile or jump
1324 }
1325 } else {
1326 // perform the fast part of the checking logic
1327 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1328 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1329 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1330 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1331 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1332 // result is a boolean
1333 __ cbz(k_RInfo, *failure_target);
1334 // successful cast, fall through to profile or jump
1335 }
1416 __ bind(success);
1417 if (dst != obj) {
1418 __ mov(dst, obj);
1419 }
1420 } else if (code == lir_instanceof) {
1421 Register obj = op->object()->as_register();
1422 Register dst = op->result_opr()->as_register();
1423 Label success, failure, done;
1424 emit_typecheck_helper(op, &success, &failure, &failure);
1425 __ bind(failure);
1426 __ mov(dst, zr);
1427 __ b(done);
1428 __ bind(success);
1429 __ mov(dst, 1);
1430 __ bind(done);
1431 } else {
1432 ShouldNotReachHere();
1433 }
1434 }
1435
1436 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1437 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1438 __ cset(rscratch1, Assembler::NE);
1439 __ membar(__ AnyAny);
1440 }
1441
1442 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1443 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1444 __ cset(rscratch1, Assembler::NE);
1445 __ membar(__ AnyAny);
1446 }
1447
1448
1449 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1450 Register addr;
1451 if (op->addr()->is_register()) {
1452 addr = as_reg(op->addr());
1453 } else {
1454 assert(op->addr()->is_address(), "what else?");
1455 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1929 __ cmp(left->as_register_lo(), right->as_register_lo());
1930 __ mov(dst->as_register(), (uint64_t)-1L);
1931 __ br(Assembler::LT, done);
1932 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1933 __ bind(done);
1934 } else {
1935 ShouldNotReachHere();
1936 }
1937 }
1938
1939
1940 void LIR_Assembler::align_call(LIR_Code code) { }
1941
1942
1943 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1944 address call = __ trampoline_call(Address(op->addr(), rtype));
1945 if (call == nullptr) {
1946 bailout("trampoline stub overflow");
1947 return;
1948 }
1949 add_call_info(code_offset(), op->info());
1950 __ post_call_nop();
1951 }
1952
1953
1954 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1955 address call = __ ic_call(op->addr());
1956 if (call == nullptr) {
1957 bailout("trampoline stub overflow");
1958 return;
1959 }
1960 add_call_info(code_offset(), op->info());
1961 __ post_call_nop();
1962 }
1963
1964 void LIR_Assembler::emit_static_call_stub() {
1965 address call_pc = __ pc();
1966 address stub = __ start_a_stub(call_stub_size());
1967 if (stub == nullptr) {
1968 bailout("static call stub overflow");
1969 return;
1970 }
1971
1972 int start = __ offset();
1973
1974 __ relocate(static_stub_Relocation::spec(call_pc));
1975 __ emit_static_call_stub();
1976
1977 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
1978 <= call_stub_size(), "stub too big");
1979 __ end_a_stub();
1980 }
2103
2104
2105 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2106 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2107 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2108 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2109 __ mov (rscratch1, c);
2110 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2111 }
2112
2113
2114 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2115 ShouldNotReachHere();
2116 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2117 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2118 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2119 __ lea(rscratch1, __ constant_oop_address(o));
2120 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2121 }
2122
2123
2124 // This code replaces a call to arraycopy; no exception may
2125 // be thrown in this code, they must be thrown in the System.arraycopy
2126 // activation frame; we could save some checks if this would not be the case
2127 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2128 ciArrayKlass* default_type = op->expected_type();
2129 Register src = op->src()->as_register();
2130 Register dst = op->dst()->as_register();
2131 Register src_pos = op->src_pos()->as_register();
2132 Register dst_pos = op->dst_pos()->as_register();
2133 Register length = op->length()->as_register();
2134 Register tmp = op->tmp()->as_register();
2135
2136 CodeStub* stub = op->stub();
2137 int flags = op->flags();
2138 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2139 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2140
2141 // if we don't know anything, just go through the generic arraycopy
2142 if (default_type == nullptr // || basic_type == T_OBJECT
2143 ) {
2144 Label done;
2145 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2146
2147 // Save the arguments in case the generic arraycopy fails and we
2148 // have to fall back to the JNI stub
2149 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2150 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2151 __ str(src, Address(sp, 4*BytesPerWord));
2152
2153 address copyfunc_addr = StubRoutines::generic_arraycopy();
2154 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2155
2156 // The arguments are in java calling convention so we shift them
2157 // to C convention
2158 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2159 __ mov(c_rarg0, j_rarg0);
2160 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2174 __ cbz(r0, *stub->continuation());
2175
2176 // Reload values from the stack so they are where the stub
2177 // expects them.
2178 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2179 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2180 __ ldr(src, Address(sp, 4*BytesPerWord));
2181
2182 // r0 is -1^K where K == partial copied count
2183 __ eonw(rscratch1, r0, zr);
2184 // adjust length down and src/end pos up by partial copied count
2185 __ subw(length, length, rscratch1);
2186 __ addw(src_pos, src_pos, rscratch1);
2187 __ addw(dst_pos, dst_pos, rscratch1);
2188 __ b(*stub->entry());
2189
2190 __ bind(*stub->continuation());
2191 return;
2192 }
2193
2194 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2195
2196 int elem_size = type2aelembytes(basic_type);
2197 int scale = exact_log2(elem_size);
2198
2199 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2200 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2201
2202 // test for null
2203 if (flags & LIR_OpArrayCopy::src_null_check) {
2204 __ cbz(src, *stub->entry());
2205 }
2206 if (flags & LIR_OpArrayCopy::dst_null_check) {
2207 __ cbz(dst, *stub->entry());
2208 }
2209
2210 // If the compiler was not able to prove that exact type of the source or the destination
2211 // of the arraycopy is an array type, check at runtime if the source or the destination is
2212 // an instance type.
2213 if (flags & LIR_OpArrayCopy::type_check) {
2688 __ verify_klass_ptr(tmp);
2689 #endif
2690 } else {
2691 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2692 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2693
2694 __ ldr(tmp, mdo_addr);
2695 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2696
2697 __ orr(tmp, tmp, TypeEntries::type_unknown);
2698 __ str(tmp, mdo_addr);
2699 // FIXME: Write barrier needed here?
2700 }
2701 }
2702
2703 __ bind(next);
2704 }
2705 COMMENT("} emit_profile_type");
2706 }
2707
2708
2709 void LIR_Assembler::align_backward_branch_target() {
2710 }
2711
2712
2713 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2714 // tmp must be unused
2715 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2716
2717 if (left->is_single_cpu()) {
2718 assert(dest->is_single_cpu(), "expect single result reg");
2719 __ negw(dest->as_register(), left->as_register());
2720 } else if (left->is_double_cpu()) {
2721 assert(dest->is_double_cpu(), "expect double result reg");
2722 __ neg(dest->as_register_lo(), left->as_register_lo());
2723 } else if (left->is_single_fpu()) {
2724 assert(dest->is_single_fpu(), "expect single float result reg");
2725 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2726 } else {
2727 assert(left->is_double_fpu(), "expect double float operand reg");
2827 void LIR_Assembler::membar_loadload() {
2828 __ membar(Assembler::LoadLoad);
2829 }
2830
2831 void LIR_Assembler::membar_storestore() {
2832 __ membar(MacroAssembler::StoreStore);
2833 }
2834
2835 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2836
2837 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2838
2839 void LIR_Assembler::on_spin_wait() {
2840 __ spin_wait();
2841 }
2842
2843 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2844 __ mov(result_reg->as_register(), rthread);
2845 }
2846
2847
2848 void LIR_Assembler::peephole(LIR_List *lir) {
2849 #if 0
2850 if (tableswitch_count >= max_tableswitches)
2851 return;
2852
2853 /*
2854 This finite-state automaton recognizes sequences of compare-and-
2855 branch instructions. We will turn them into a tableswitch. You
2856 could argue that C1 really shouldn't be doing this sort of
2857 optimization, but without it the code is really horrible.
2858 */
2859
2860 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2861 int first_key, last_key = -2147483648;
2862 int next_key = 0;
2863 int start_insn = -1;
2864 int last_insn = -1;
2865 Register reg = noreg;
2866 LIR_Opr reg_opr;
|
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "ci/ciObjArrayKlass.hpp"
38 #include "code/aotCodeCache.hpp"
39 #include "code/compiledIC.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/gc_globals.hpp"
42 #include "nativeInst_aarch64.hpp"
43 #include "oops/objArrayKlass.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/frame.inline.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "utilities/powerOfTwo.hpp"
49 #include "vmreg_aarch64.inline.hpp"
50
51
52 #ifndef PRODUCT
53 #define COMMENT(x) do { __ block_comment(x); } while (0)
54 #else
55 #define COMMENT(x)
56 #endif
57
58 NEEDS_CLEANUP // remove this definitions ?
59 const Register SYNC_header = r0; // synchronization header
60 const Register SHIFT_count = r0; // where count for shift operations must be
61
62 #define __ _masm->
63
64
397 MonitorExitStub* stub = nullptr;
398 if (method()->is_synchronized()) {
399 monitor_address(0, FrameMap::r0_opr);
400 stub = new MonitorExitStub(FrameMap::r0_opr, 0);
401 __ unlock_object(r5, r4, r0, r6, *stub->entry());
402 __ bind(*stub->continuation());
403 }
404
405 if (compilation()->env()->dtrace_method_probes()) {
406 __ mov(c_rarg0, rthread);
407 __ mov_metadata(c_rarg1, method()->constant_encoding());
408 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
409 }
410
411 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
412 __ mov(r0, r19); // Restore the exception
413 }
414
415 // remove the activation and dispatch to the unwind handler
416 __ block_comment("remove_frame and dispatch to the unwind handler");
417 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
418 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
419
420 // Emit the slow path assembly
421 if (stub != nullptr) {
422 stub->emit_code(this);
423 }
424
425 return offset;
426 }
427
428
429 int LIR_Assembler::emit_deopt_handler() {
430 // generate code for exception handler
431 address handler_base = __ start_a_stub(deopt_handler_size());
432 if (handler_base == nullptr) {
433 // not enough space left for the handler
434 bailout("deopt handler overflow");
435 return -1;
436 }
437
449 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
450 "out of bounds read in post-call NOP check");
451 __ end_a_stub();
452
453 return entry_offset;
454 }
455
456 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
457 _masm->code_section()->relocate(adr, relocInfo::poll_type);
458 int pc_offset = code_offset();
459 flush_debug_info(pc_offset);
460 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
461 if (info->exception_handlers() != nullptr) {
462 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
463 }
464 }
465
466 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
467 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
468
469 if (InlineTypeReturnedAsFields) {
470 // Check if we are returning an non-null inline type and load its fields into registers
471 ciType* return_type = compilation()->method()->return_type();
472 if (return_type->is_inlinetype()) {
473 ciInlineKlass* vk = return_type->as_inline_klass();
474 if (vk->can_be_returned_as_fields()) {
475 address unpack_handler = vk->unpack_handler();
476 assert(unpack_handler != nullptr, "must be");
477 __ far_call(RuntimeAddress(unpack_handler));
478 }
479 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
480 Label skip;
481 Label not_null;
482 __ cbnz(r0, not_null);
483 // Returned value is null, zero all return registers because they may belong to oop fields
484 __ mov(j_rarg1, zr);
485 __ mov(j_rarg2, zr);
486 __ mov(j_rarg3, zr);
487 __ mov(j_rarg4, zr);
488 __ mov(j_rarg5, zr);
489 __ mov(j_rarg6, zr);
490 __ mov(j_rarg7, zr);
491 __ b(skip);
492 __ bind(not_null);
493
494 // Check if we are returning an non-null inline type and load its fields into registers
495 __ test_oop_is_not_inline_type(r0, rscratch2, skip, /* can_be_null= */ false);
496
497 // Load fields from a buffered value with an inline class specific handler
498 __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
499 __ ldr(rscratch1, Address(rscratch1, InlineKlass::adr_members_offset()));
500 __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
501 // Unpack handler can be null if inline type is not scalarizable in returns
502 __ cbz(rscratch1, skip);
503 __ blr(rscratch1);
504
505 __ bind(skip);
506 }
507 // At this point, r0 points to the value object (for interpreter or C1 caller).
508 // The fields of the object are copied into registers (for C2 caller).
509 }
510
511 // Pop the stack before the safepoint code
512 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
513
514 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
515 __ reserved_stack_check();
516 }
517
518 code_stub->set_safepoint_offset(__ offset());
519 __ relocate(relocInfo::poll_return_type);
520 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
521 __ ret(lr);
522 }
523
524 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
525 return (__ store_inline_type_fields_to_buf(vk, false));
526 }
527
528 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
529 guarantee(info != nullptr, "Shouldn't be null");
530 __ get_polling_page(rscratch1, relocInfo::poll_type);
531 add_debug_info_for_branch(info); // This isn't just debug info:
532 // it's the oop map
533 __ read_polling_page(rscratch1, relocInfo::poll_type);
534 return __ offset();
535 }
536
537
538 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
539 if (from_reg == r31_sp)
540 from_reg = sp;
541 if (to_reg == r31_sp)
542 to_reg = sp;
543 __ mov(to_reg, from_reg);
544 }
545
546 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
547
563 __ mov(dest->as_register(), c->as_jint());
564 break;
565 }
566
567 case T_LONG: {
568 assert(patch_code == lir_patch_none, "no patching handled here");
569 #if INCLUDE_CDS
570 if (AOTCodeCache::is_on_for_dump()) {
571 address b = c->as_pointer();
572 if (AOTRuntimeConstants::contains(b)) {
573 __ load_aotrc_address(dest->as_register_lo(), b);
574 break;
575 }
576 }
577 #endif
578 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
579 break;
580 }
581
582 case T_OBJECT: {
583 if (patch_code != lir_patch_none) {
584 jobject2reg_with_patching(dest->as_register(), info);
585 } else {
586 jobject2reg(c->as_jobject(), dest->as_register());
587 }
588 break;
589 }
590
591 case T_METADATA: {
592 if (patch_code != lir_patch_none) {
593 klass2reg_with_patching(dest->as_register(), info);
594 } else {
595 __ mov_metadata(dest->as_register(), c->as_metadata());
596 }
597 break;
598 }
599
600 case T_FLOAT: {
601 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
602 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
603 } else {
604 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
605 __ ldrs(dest->as_float_reg(), Address(rscratch1));
606 }
676 LIR_Const* c = src->as_constant_ptr();
677 LIR_Address* to_addr = dest->as_address_ptr();
678
679 void (Assembler::* insn)(Register Rt, const Address &adr);
680
681 switch (type) {
682 case T_ADDRESS:
683 assert(c->as_jint() == 0, "should be");
684 insn = &Assembler::str;
685 break;
686 case T_LONG:
687 assert(c->as_jlong() == 0, "should be");
688 insn = &Assembler::str;
689 break;
690 case T_INT:
691 assert(c->as_jint() == 0, "should be");
692 insn = &Assembler::strw;
693 break;
694 case T_OBJECT:
695 case T_ARRAY:
696 // Non-null case is not handled on aarch64 but handled on x86
697 // FIXME: do we need to add it here?
698 assert(c->as_jobject() == nullptr, "should be");
699 if (UseCompressedOops && !wide) {
700 insn = &Assembler::strw;
701 } else {
702 insn = &Assembler::str;
703 }
704 break;
705 case T_CHAR:
706 case T_SHORT:
707 assert(c->as_jint() == 0, "should be");
708 insn = &Assembler::strh;
709 break;
710 case T_BOOLEAN:
711 case T_BYTE:
712 assert(c->as_jint() == 0, "should be");
713 insn = &Assembler::strb;
714 break;
715 default:
716 ShouldNotReachHere();
717 insn = &Assembler::str; // unreachable
1025 case T_CHAR:
1026 __ ldrh(dest->as_register(), as_Address(from_addr));
1027 break;
1028 case T_SHORT:
1029 __ ldrsh(dest->as_register(), as_Address(from_addr));
1030 break;
1031
1032 default:
1033 ShouldNotReachHere();
1034 }
1035
1036 if (is_reference_type(type)) {
1037 if (UseCompressedOops && !wide) {
1038 __ decode_heap_oop(dest->as_register());
1039 }
1040
1041 __ verify_oop(dest->as_register());
1042 }
1043 }
1044
1045 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1046 assert(dst->is_cpu_register(), "must be");
1047 assert(dst->type() == src->type(), "must be");
1048
1049 if (src->is_cpu_register()) {
1050 reg2reg(src, dst);
1051 } else if (src->is_stack()) {
1052 stack2reg(src, dst, dst->type());
1053 } else if (src->is_constant()) {
1054 const2reg(src, dst, lir_patch_none, nullptr);
1055 } else {
1056 ShouldNotReachHere();
1057 }
1058 }
1059
1060 int LIR_Assembler::array_element_size(BasicType type) const {
1061 int elem_size = type2aelembytes(type);
1062 return exact_log2(elem_size);
1063 }
1064
1065
1066 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1067 switch (op->code()) {
1068 case lir_idiv:
1069 case lir_irem:
1070 arithmetic_idiv(op->code(),
1071 op->in_opr1(),
1072 op->in_opr2(),
1073 op->in_opr3(),
1074 op->result_opr(),
1075 op->info());
1076 break;
1077 case lir_fmad:
1078 __ fmaddd(op->result_opr()->as_double_reg(),
1230 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1231 __ ldarb(rscratch1, rscratch1);
1232 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1233 add_debug_info_for_null_check_here(op->stub()->info());
1234 __ br(Assembler::NE, *op->stub()->entry());
1235 }
1236 __ allocate_object(op->obj()->as_register(),
1237 op->tmp1()->as_register(),
1238 op->tmp2()->as_register(),
1239 op->header_size(),
1240 op->object_size(),
1241 op->klass()->as_register(),
1242 *op->stub()->entry());
1243 __ bind(*op->stub()->continuation());
1244 }
1245
1246 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1247 Register len = op->len()->as_register();
1248 __ uxtw(len, len);
1249
1250 if (UseSlowPath || op->always_slow_path() ||
1251 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1252 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1253 __ b(*op->stub()->entry());
1254 } else {
1255 Register tmp1 = op->tmp1()->as_register();
1256 Register tmp2 = op->tmp2()->as_register();
1257 Register tmp3 = op->tmp3()->as_register();
1258 if (len == tmp1) {
1259 tmp1 = tmp3;
1260 } else if (len == tmp2) {
1261 tmp2 = tmp3;
1262 } else if (len == tmp3) {
1263 // everything is ok
1264 } else {
1265 __ mov(tmp3, len);
1266 }
1267 __ allocate_array(op->obj()->as_register(),
1268 len,
1269 tmp1,
1270 tmp2,
1307 md = method->method_data_or_null();
1308 assert(md != nullptr, "Sanity");
1309 data = md->bci_to_data(bci);
1310 assert(data != nullptr, "need data for type check");
1311 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1312 }
1313 Label* success_target = success;
1314 Label* failure_target = failure;
1315
1316 if (obj == k_RInfo) {
1317 k_RInfo = dst;
1318 } else if (obj == klass_RInfo) {
1319 klass_RInfo = dst;
1320 }
1321
1322 Rtmp1 = op->tmp3()->as_register();
1323 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1324
1325 assert_different_registers(obj, k_RInfo, klass_RInfo);
1326
1327 if (op->need_null_check()) {
1328 if (should_profile) {
1329 Register mdo = klass_RInfo;
1330 __ mov_metadata(mdo, md->constant_encoding());
1331 Label not_null;
1332 __ cbnz(obj, not_null);
1333 // Object is null; update MDO and exit
1334 Address data_addr
1335 = __ form_address(rscratch2, mdo,
1336 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1337 0);
1338 __ ldrb(rscratch1, data_addr);
1339 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1340 __ strb(rscratch1, data_addr);
1341 __ b(*obj_is_null);
1342 __ bind(not_null);
1343
1344 Register recv = k_RInfo;
1345 __ load_klass(recv, obj);
1346 type_profile_helper(mdo, md, data, recv);
1347 } else {
1348 __ cbz(obj, *obj_is_null);
1349 }
1350 }
1351
1352 if (!k->is_loaded()) {
1353 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1354 } else {
1355 __ mov_metadata(k_RInfo, k->constant_encoding());
1356 }
1357 __ verify_oop(obj);
1358
1359 if (op->fast_check()) {
1360 assert(!k->is_loaded() || !k->is_obj_array_klass(), "Use refined array for a direct pointer comparison");
1361 // get object class
1362 // not a safepoint as obj null check happens earlier
1363 __ load_klass(rscratch1, obj);
1364 __ cmp( rscratch1, k_RInfo);
1365
1366 __ br(Assembler::NE, *failure_target);
1367 // successful cast, fall through to profile or jump
1368 } else {
1369 // get object class
1370 // not a safepoint as obj null check happens earlier
1371 __ load_klass(klass_RInfo, obj);
1372 if (k->is_loaded()) {
1373 // See if we get an immediate positive hit
1374 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1375 __ cmp(k_RInfo, rscratch1);
1376 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1377 __ br(Assembler::NE, *failure_target);
1378 // successful cast, fall through to profile or jump
1379 } else {
1380 // See if we get an immediate positive hit
1381 __ br(Assembler::EQ, *success_target);
1382 // check for self
1383 if (k->is_loaded() && k->is_obj_array_klass()) {
1384 // For a direct pointer comparison, we need the refined array klass pointer
1385 ciKlass* k_refined = ciObjArrayKlass::make(k->as_obj_array_klass()->element_klass());
1386 __ mov_metadata(rscratch1, k_refined->constant_encoding());
1387 __ cmp(klass_RInfo, rscratch1);
1388 } else {
1389 __ cmp(klass_RInfo, k_RInfo);
1390 }
1391 __ br(Assembler::EQ, *success_target);
1392
1393 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1394 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1395 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1396 // result is a boolean
1397 __ cbzw(klass_RInfo, *failure_target);
1398 // successful cast, fall through to profile or jump
1399 }
1400 } else {
1401 // perform the fast part of the checking logic
1402 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1403 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1404 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1405 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1406 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1407 // result is a boolean
1408 __ cbz(k_RInfo, *failure_target);
1409 // successful cast, fall through to profile or jump
1410 }
1491 __ bind(success);
1492 if (dst != obj) {
1493 __ mov(dst, obj);
1494 }
1495 } else if (code == lir_instanceof) {
1496 Register obj = op->object()->as_register();
1497 Register dst = op->result_opr()->as_register();
1498 Label success, failure, done;
1499 emit_typecheck_helper(op, &success, &failure, &failure);
1500 __ bind(failure);
1501 __ mov(dst, zr);
1502 __ b(done);
1503 __ bind(success);
1504 __ mov(dst, 1);
1505 __ bind(done);
1506 } else {
1507 ShouldNotReachHere();
1508 }
1509 }
1510
1511 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1512 // We are loading/storing from/to an array that *may* be a flat array (the
1513 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1514 // If this array is a flat array, take the slow path.
1515 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1516 if (!op->value()->is_illegal()) {
1517 // The array is not a flat array, but it might be null-free. If we are storing
1518 // a null into a null-free array, take the slow path (which will throw NPE).
1519 Label skip;
1520 __ cbnz(op->value()->as_register(), skip);
1521 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1522 __ bind(skip);
1523 }
1524 }
1525
1526 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1527 // We are storing into an array that *may* be null-free (the declared type is
1528 // Object[], abstract[], interface[] or VT.ref[]).
1529 Label test_mark_word;
1530 Register tmp = op->tmp()->as_register();
1531 __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1532 __ tst(tmp, markWord::unlocked_value);
1533 __ br(Assembler::NE, test_mark_word);
1534 __ load_prototype_header(tmp, op->array()->as_register());
1535 __ bind(test_mark_word);
1536 __ tst(tmp, markWord::null_free_array_bit_in_place);
1537 }
1538
1539 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1540 Label L_oops_equal;
1541 Label L_oops_not_equal;
1542 Label L_end;
1543
1544 Register left = op->left()->as_register();
1545 Register right = op->right()->as_register();
1546
1547 __ cmp(left, right);
1548 __ br(Assembler::EQ, L_oops_equal);
1549
1550 // (1) Null check -- if one of the operands is null, the other must not be null (because
1551 // the two references are not equal), so they are not substitutable,
1552 // FIXME: do null check only if the operand is nullable
1553 {
1554 __ cbz(left, L_oops_not_equal);
1555 __ cbz(right, L_oops_not_equal);
1556 }
1557
1558 ciKlass* left_klass = op->left_klass();
1559 ciKlass* right_klass = op->right_klass();
1560
1561 // (2) Inline type check -- if either of the operands is not a inline type,
1562 // they are not substitutable. We do this only if we are not sure that the
1563 // operands are inline type
1564 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1565 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1566 Register tmp1 = op->tmp1()->as_register();
1567 Register tmp2 = op->tmp2()->as_register();
1568 __ mov(tmp1, markWord::inline_type_pattern);
1569 __ ldr(tmp2, Address(left, oopDesc::mark_offset_in_bytes()));
1570 __ andr(tmp1, tmp1, tmp2);
1571 __ ldr(tmp2, Address(right, oopDesc::mark_offset_in_bytes()));
1572 __ andr(tmp1, tmp1, tmp2);
1573 __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1574 __ br(Assembler::NE, L_oops_not_equal);
1575 }
1576
1577 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1578 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1579 // No need to load klass -- the operands are statically known to be the same inline klass.
1580 __ b(*op->stub()->entry());
1581 } else {
1582 Register tmp1 = op->tmp1()->as_register();
1583 Register tmp2 = op->tmp2()->as_register();
1584 __ cmp_klasses_from_objects(left, right, tmp1, tmp2);
1585 __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1586 // fall through to L_oops_not_equal
1587 }
1588
1589 __ bind(L_oops_not_equal);
1590 move(op->not_equal_result(), op->result_opr());
1591 __ b(L_end);
1592
1593 __ bind(L_oops_equal);
1594 move(op->equal_result(), op->result_opr());
1595 __ b(L_end);
1596
1597 // We've returned from the stub. R0 contains 0x0 IFF the two
1598 // operands are not substitutable. (Don't compare against 0x1 in case the
1599 // C compiler is naughty)
1600 __ bind(*op->stub()->continuation());
1601 __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1602 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1603 // fall-through
1604 __ bind(L_end);
1605 }
1606
1607
1608 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1609 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1610 __ cset(rscratch1, Assembler::NE);
1611 __ membar(__ AnyAny);
1612 }
1613
1614 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1615 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1616 __ cset(rscratch1, Assembler::NE);
1617 __ membar(__ AnyAny);
1618 }
1619
1620
1621 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1622 Register addr;
1623 if (op->addr()->is_register()) {
1624 addr = as_reg(op->addr());
1625 } else {
1626 assert(op->addr()->is_address(), "what else?");
1627 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
2101 __ cmp(left->as_register_lo(), right->as_register_lo());
2102 __ mov(dst->as_register(), (uint64_t)-1L);
2103 __ br(Assembler::LT, done);
2104 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2105 __ bind(done);
2106 } else {
2107 ShouldNotReachHere();
2108 }
2109 }
2110
2111
2112 void LIR_Assembler::align_call(LIR_Code code) { }
2113
2114
2115 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2116 address call = __ trampoline_call(Address(op->addr(), rtype));
2117 if (call == nullptr) {
2118 bailout("trampoline stub overflow");
2119 return;
2120 }
2121 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2122 __ post_call_nop();
2123 }
2124
2125
2126 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2127 address call = __ ic_call(op->addr());
2128 if (call == nullptr) {
2129 bailout("trampoline stub overflow");
2130 return;
2131 }
2132 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2133 __ post_call_nop();
2134 }
2135
2136 void LIR_Assembler::emit_static_call_stub() {
2137 address call_pc = __ pc();
2138 address stub = __ start_a_stub(call_stub_size());
2139 if (stub == nullptr) {
2140 bailout("static call stub overflow");
2141 return;
2142 }
2143
2144 int start = __ offset();
2145
2146 __ relocate(static_stub_Relocation::spec(call_pc));
2147 __ emit_static_call_stub();
2148
2149 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2150 <= call_stub_size(), "stub too big");
2151 __ end_a_stub();
2152 }
2275
2276
2277 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2278 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2279 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2280 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2281 __ mov (rscratch1, c);
2282 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2283 }
2284
2285
2286 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2287 ShouldNotReachHere();
2288 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2289 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2290 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2291 __ lea(rscratch1, __ constant_oop_address(o));
2292 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2293 }
2294
2295 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2296 if (null_check) {
2297 __ cbz(obj, *slow_path->entry());
2298 }
2299 if (is_dest) {
2300 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2301 // TODO 8350865 Flat no longer implies null-free, so we need to check for flat dest. Can we do better here?
2302 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2303 } else {
2304 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2305 }
2306 }
2307
2308 // This code replaces a call to arraycopy; no exception may
2309 // be thrown in this code, they must be thrown in the System.arraycopy
2310 // activation frame; we could save some checks if this would not be the case
2311 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2312 ciArrayKlass* default_type = op->expected_type();
2313 Register src = op->src()->as_register();
2314 Register dst = op->dst()->as_register();
2315 Register src_pos = op->src_pos()->as_register();
2316 Register dst_pos = op->dst_pos()->as_register();
2317 Register length = op->length()->as_register();
2318 Register tmp = op->tmp()->as_register();
2319
2320 CodeStub* stub = op->stub();
2321 int flags = op->flags();
2322 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2323 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2324
2325 if (flags & LIR_OpArrayCopy::always_slow_path) {
2326 __ b(*stub->entry());
2327 __ bind(*stub->continuation());
2328 return;
2329 }
2330
2331 // if we don't know anything, just go through the generic arraycopy
2332 if (default_type == nullptr // || basic_type == T_OBJECT
2333 ) {
2334 Label done;
2335 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2336
2337 // Save the arguments in case the generic arraycopy fails and we
2338 // have to fall back to the JNI stub
2339 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2340 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2341 __ str(src, Address(sp, 4*BytesPerWord));
2342
2343 address copyfunc_addr = StubRoutines::generic_arraycopy();
2344 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2345
2346 // The arguments are in java calling convention so we shift them
2347 // to C convention
2348 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2349 __ mov(c_rarg0, j_rarg0);
2350 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2364 __ cbz(r0, *stub->continuation());
2365
2366 // Reload values from the stack so they are where the stub
2367 // expects them.
2368 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2369 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2370 __ ldr(src, Address(sp, 4*BytesPerWord));
2371
2372 // r0 is -1^K where K == partial copied count
2373 __ eonw(rscratch1, r0, zr);
2374 // adjust length down and src/end pos up by partial copied count
2375 __ subw(length, length, rscratch1);
2376 __ addw(src_pos, src_pos, rscratch1);
2377 __ addw(dst_pos, dst_pos, rscratch1);
2378 __ b(*stub->entry());
2379
2380 __ bind(*stub->continuation());
2381 return;
2382 }
2383
2384 // Handle inline type arrays
2385 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2386 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2387 }
2388 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2389 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2390 }
2391
2392 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2393
2394 int elem_size = type2aelembytes(basic_type);
2395 int scale = exact_log2(elem_size);
2396
2397 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2398 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2399
2400 // test for null
2401 if (flags & LIR_OpArrayCopy::src_null_check) {
2402 __ cbz(src, *stub->entry());
2403 }
2404 if (flags & LIR_OpArrayCopy::dst_null_check) {
2405 __ cbz(dst, *stub->entry());
2406 }
2407
2408 // If the compiler was not able to prove that exact type of the source or the destination
2409 // of the arraycopy is an array type, check at runtime if the source or the destination is
2410 // an instance type.
2411 if (flags & LIR_OpArrayCopy::type_check) {
2886 __ verify_klass_ptr(tmp);
2887 #endif
2888 } else {
2889 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2890 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2891
2892 __ ldr(tmp, mdo_addr);
2893 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2894
2895 __ orr(tmp, tmp, TypeEntries::type_unknown);
2896 __ str(tmp, mdo_addr);
2897 // FIXME: Write barrier needed here?
2898 }
2899 }
2900
2901 __ bind(next);
2902 }
2903 COMMENT("} emit_profile_type");
2904 }
2905
2906 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
2907 Register obj = op->obj()->as_register();
2908 Register tmp = op->tmp()->as_pointer_register();
2909 bool not_null = op->not_null();
2910 int flag = op->flag();
2911
2912 Label not_inline_type;
2913 if (!not_null) {
2914 __ cbz(obj, not_inline_type);
2915 }
2916
2917 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
2918
2919 Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
2920 __ ldrb(rscratch1, mdo_addr);
2921 __ orr(rscratch1, rscratch1, flag);
2922 __ strb(rscratch1, mdo_addr);
2923
2924 __ bind(not_inline_type);
2925 }
2926
2927 void LIR_Assembler::align_backward_branch_target() {
2928 }
2929
2930
2931 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2932 // tmp must be unused
2933 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2934
2935 if (left->is_single_cpu()) {
2936 assert(dest->is_single_cpu(), "expect single result reg");
2937 __ negw(dest->as_register(), left->as_register());
2938 } else if (left->is_double_cpu()) {
2939 assert(dest->is_double_cpu(), "expect double result reg");
2940 __ neg(dest->as_register_lo(), left->as_register_lo());
2941 } else if (left->is_single_fpu()) {
2942 assert(dest->is_single_fpu(), "expect single float result reg");
2943 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2944 } else {
2945 assert(left->is_double_fpu(), "expect double float operand reg");
3045 void LIR_Assembler::membar_loadload() {
3046 __ membar(Assembler::LoadLoad);
3047 }
3048
3049 void LIR_Assembler::membar_storestore() {
3050 __ membar(MacroAssembler::StoreStore);
3051 }
3052
3053 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3054
3055 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3056
3057 void LIR_Assembler::on_spin_wait() {
3058 __ spin_wait();
3059 }
3060
3061 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3062 __ mov(result_reg->as_register(), rthread);
3063 }
3064
3065 void LIR_Assembler::check_orig_pc() {
3066 __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3067 __ cmp(rscratch2, (u1)NULL_WORD);
3068 }
3069
3070 void LIR_Assembler::peephole(LIR_List *lir) {
3071 #if 0
3072 if (tableswitch_count >= max_tableswitches)
3073 return;
3074
3075 /*
3076 This finite-state automaton recognizes sequences of compare-and-
3077 branch instructions. We will turn them into a tableswitch. You
3078 could argue that C1 really shouldn't be doing this sort of
3079 optimization, but without it the code is really horrible.
3080 */
3081
3082 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3083 int first_key, last_key = -2147483648;
3084 int next_key = 0;
3085 int start_insn = -1;
3086 int last_insn = -1;
3087 Register reg = noreg;
3088 LIR_Opr reg_opr;
|