15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "code/compiledIC.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_aarch64.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "utilities/powerOfTwo.hpp"
45 #include "vmreg_aarch64.inline.hpp"
46
47
48 #ifndef PRODUCT
49 #define COMMENT(x) do { __ block_comment(x); } while (0)
50 #else
51 #define COMMENT(x)
52 #endif
53
54 NEEDS_CLEANUP // remove this definitions ?
55 const Register SYNC_header = r0; // synchronization header
56 const Register SHIFT_count = r0; // where count for shift operations must be
57
58 #define __ _masm->
59
60
409 MonitorExitStub* stub = nullptr;
410 if (method()->is_synchronized()) {
411 monitor_address(0, FrameMap::r0_opr);
412 stub = new MonitorExitStub(FrameMap::r0_opr, 0);
413 __ unlock_object(r5, r4, r0, r6, *stub->entry());
414 __ bind(*stub->continuation());
415 }
416
417 if (compilation()->env()->dtrace_method_probes()) {
418 __ mov(c_rarg0, rthread);
419 __ mov_metadata(c_rarg1, method()->constant_encoding());
420 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
421 }
422
423 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
424 __ mov(r0, r19); // Restore the exception
425 }
426
427 // remove the activation and dispatch to the unwind handler
428 __ block_comment("remove_frame and dispatch to the unwind handler");
429 __ remove_frame(initial_frame_size_in_bytes());
430 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
431
432 // Emit the slow path assembly
433 if (stub != nullptr) {
434 stub->emit_code(this);
435 }
436
437 return offset;
438 }
439
440
441 int LIR_Assembler::emit_deopt_handler() {
442 // generate code for exception handler
443 address handler_base = __ start_a_stub(deopt_handler_size());
444 if (handler_base == nullptr) {
445 // not enough space left for the handler
446 bailout("deopt handler overflow");
447 return -1;
448 }
449
461 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
462 "out of bounds read in post-call NOP check");
463 __ end_a_stub();
464
465 return entry_offset;
466 }
467
468 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
469 _masm->code_section()->relocate(adr, relocInfo::poll_type);
470 int pc_offset = code_offset();
471 flush_debug_info(pc_offset);
472 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
473 if (info->exception_handlers() != nullptr) {
474 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
475 }
476 }
477
478 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
479 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
480
481 // Pop the stack before the safepoint code
482 __ remove_frame(initial_frame_size_in_bytes());
483
484 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
485 __ reserved_stack_check();
486 }
487
488 code_stub->set_safepoint_offset(__ offset());
489 __ relocate(relocInfo::poll_return_type);
490 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
491 __ ret(lr);
492 }
493
494 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
495 guarantee(info != nullptr, "Shouldn't be null");
496 __ get_polling_page(rscratch1, relocInfo::poll_type);
497 add_debug_info_for_branch(info); // This isn't just debug info:
498 // it's the oop map
499 __ read_polling_page(rscratch1, relocInfo::poll_type);
500 return __ offset();
501 }
502
503
504 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
505 if (from_reg == r31_sp)
506 from_reg = sp;
507 if (to_reg == r31_sp)
508 to_reg = sp;
509 __ mov(to_reg, from_reg);
510 }
511
512 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
513
520 switch (c->type()) {
521 case T_INT: {
522 assert(patch_code == lir_patch_none, "no patching handled here");
523 __ movw(dest->as_register(), c->as_jint());
524 break;
525 }
526
527 case T_ADDRESS: {
528 assert(patch_code == lir_patch_none, "no patching handled here");
529 __ mov(dest->as_register(), c->as_jint());
530 break;
531 }
532
533 case T_LONG: {
534 assert(patch_code == lir_patch_none, "no patching handled here");
535 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
536 break;
537 }
538
539 case T_OBJECT: {
540 if (patch_code == lir_patch_none) {
541 jobject2reg(c->as_jobject(), dest->as_register());
542 } else {
543 jobject2reg_with_patching(dest->as_register(), info);
544 }
545 break;
546 }
547
548 case T_METADATA: {
549 if (patch_code != lir_patch_none) {
550 klass2reg_with_patching(dest->as_register(), info);
551 } else {
552 __ mov_metadata(dest->as_register(), c->as_metadata());
553 }
554 break;
555 }
556
557 case T_FLOAT: {
558 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
559 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
560 } else {
561 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
562 __ ldrs(dest->as_float_reg(), Address(rscratch1));
563 }
633 LIR_Const* c = src->as_constant_ptr();
634 LIR_Address* to_addr = dest->as_address_ptr();
635
636 void (Assembler::* insn)(Register Rt, const Address &adr);
637
638 switch (type) {
639 case T_ADDRESS:
640 assert(c->as_jint() == 0, "should be");
641 insn = &Assembler::str;
642 break;
643 case T_LONG:
644 assert(c->as_jlong() == 0, "should be");
645 insn = &Assembler::str;
646 break;
647 case T_INT:
648 assert(c->as_jint() == 0, "should be");
649 insn = &Assembler::strw;
650 break;
651 case T_OBJECT:
652 case T_ARRAY:
653 assert(c->as_jobject() == nullptr, "should be");
654 if (UseCompressedOops && !wide) {
655 insn = &Assembler::strw;
656 } else {
657 insn = &Assembler::str;
658 }
659 break;
660 case T_CHAR:
661 case T_SHORT:
662 assert(c->as_jint() == 0, "should be");
663 insn = &Assembler::strh;
664 break;
665 case T_BOOLEAN:
666 case T_BYTE:
667 assert(c->as_jint() == 0, "should be");
668 insn = &Assembler::strb;
669 break;
670 default:
671 ShouldNotReachHere();
672 insn = &Assembler::str; // unreachable
980 case T_CHAR:
981 __ ldrh(dest->as_register(), as_Address(from_addr));
982 break;
983 case T_SHORT:
984 __ ldrsh(dest->as_register(), as_Address(from_addr));
985 break;
986
987 default:
988 ShouldNotReachHere();
989 }
990
991 if (is_reference_type(type)) {
992 if (UseCompressedOops && !wide) {
993 __ decode_heap_oop(dest->as_register());
994 }
995
996 __ verify_oop(dest->as_register());
997 }
998 }
999
1000
1001 int LIR_Assembler::array_element_size(BasicType type) const {
1002 int elem_size = type2aelembytes(type);
1003 return exact_log2(elem_size);
1004 }
1005
1006
1007 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1008 switch (op->code()) {
1009 case lir_idiv:
1010 case lir_irem:
1011 arithmetic_idiv(op->code(),
1012 op->in_opr1(),
1013 op->in_opr2(),
1014 op->in_opr3(),
1015 op->result_opr(),
1016 op->info());
1017 break;
1018 case lir_fmad:
1019 __ fmaddd(op->result_opr()->as_double_reg(),
1171 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1172 __ ldarb(rscratch1, rscratch1);
1173 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1174 add_debug_info_for_null_check_here(op->stub()->info());
1175 __ br(Assembler::NE, *op->stub()->entry());
1176 }
1177 __ allocate_object(op->obj()->as_register(),
1178 op->tmp1()->as_register(),
1179 op->tmp2()->as_register(),
1180 op->header_size(),
1181 op->object_size(),
1182 op->klass()->as_register(),
1183 *op->stub()->entry());
1184 __ bind(*op->stub()->continuation());
1185 }
1186
1187 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1188 Register len = op->len()->as_register();
1189 __ uxtw(len, len);
1190
1191 if (UseSlowPath ||
1192 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1193 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1194 __ b(*op->stub()->entry());
1195 } else {
1196 Register tmp1 = op->tmp1()->as_register();
1197 Register tmp2 = op->tmp2()->as_register();
1198 Register tmp3 = op->tmp3()->as_register();
1199 if (len == tmp1) {
1200 tmp1 = tmp3;
1201 } else if (len == tmp2) {
1202 tmp2 = tmp3;
1203 } else if (len == tmp3) {
1204 // everything is ok
1205 } else {
1206 __ mov(tmp3, len);
1207 }
1208 __ allocate_array(op->obj()->as_register(),
1209 len,
1210 tmp1,
1211 tmp2,
1251 assert(data != nullptr, "need data for type check");
1252 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1253 }
1254 Label* success_target = success;
1255 Label* failure_target = failure;
1256
1257 if (obj == k_RInfo) {
1258 k_RInfo = dst;
1259 } else if (obj == klass_RInfo) {
1260 klass_RInfo = dst;
1261 }
1262 if (k->is_loaded() && !UseCompressedClassPointers) {
1263 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1264 } else {
1265 Rtmp1 = op->tmp3()->as_register();
1266 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1267 }
1268
1269 assert_different_registers(obj, k_RInfo, klass_RInfo);
1270
1271 if (should_profile) {
1272 Register mdo = klass_RInfo;
1273 __ mov_metadata(mdo, md->constant_encoding());
1274 Label not_null;
1275 __ cbnz(obj, not_null);
1276 // Object is null; update MDO and exit
1277 Address data_addr
1278 = __ form_address(rscratch2, mdo,
1279 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1280 0);
1281 __ ldrb(rscratch1, data_addr);
1282 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1283 __ strb(rscratch1, data_addr);
1284 __ b(*obj_is_null);
1285 __ bind(not_null);
1286
1287 Register recv = k_RInfo;
1288 __ load_klass(recv, obj);
1289 type_profile_helper(mdo, md, data, recv);
1290 } else {
1291 __ cbz(obj, *obj_is_null);
1292 }
1293
1294 if (!k->is_loaded()) {
1295 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1296 } else {
1297 __ mov_metadata(k_RInfo, k->constant_encoding());
1298 }
1299 __ verify_oop(obj);
1300
1301 if (op->fast_check()) {
1302 // get object class
1303 // not a safepoint as obj null check happens earlier
1304 __ load_klass(rscratch1, obj);
1305 __ cmp( rscratch1, k_RInfo);
1306
1307 __ br(Assembler::NE, *failure_target);
1308 // successful cast, fall through to profile or jump
1309 } else {
1310 // get object class
1311 // not a safepoint as obj null check happens earlier
1312 __ load_klass(klass_RInfo, obj);
1313 if (k->is_loaded()) {
1314 // See if we get an immediate positive hit
1315 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1316 __ cmp(k_RInfo, rscratch1);
1317 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1318 __ br(Assembler::NE, *failure_target);
1319 // successful cast, fall through to profile or jump
1320 } else {
1321 // See if we get an immediate positive hit
1322 __ br(Assembler::EQ, *success_target);
1323 // check for self
1324 __ cmp(klass_RInfo, k_RInfo);
1325 __ br(Assembler::EQ, *success_target);
1326
1327 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1328 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1329 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1330 // result is a boolean
1331 __ cbzw(klass_RInfo, *failure_target);
1332 // successful cast, fall through to profile or jump
1333 }
1334 } else {
1335 // perform the fast part of the checking logic
1336 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1337 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1338 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1339 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1340 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1341 // result is a boolean
1342 __ cbz(k_RInfo, *failure_target);
1343 // successful cast, fall through to profile or jump
1344 }
1425 __ bind(success);
1426 if (dst != obj) {
1427 __ mov(dst, obj);
1428 }
1429 } else if (code == lir_instanceof) {
1430 Register obj = op->object()->as_register();
1431 Register dst = op->result_opr()->as_register();
1432 Label success, failure, done;
1433 emit_typecheck_helper(op, &success, &failure, &failure);
1434 __ bind(failure);
1435 __ mov(dst, zr);
1436 __ b(done);
1437 __ bind(success);
1438 __ mov(dst, 1);
1439 __ bind(done);
1440 } else {
1441 ShouldNotReachHere();
1442 }
1443 }
1444
1445 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1446 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1447 __ cset(rscratch1, Assembler::NE);
1448 __ membar(__ AnyAny);
1449 }
1450
1451 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1452 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1453 __ cset(rscratch1, Assembler::NE);
1454 __ membar(__ AnyAny);
1455 }
1456
1457
1458 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1459 Register addr;
1460 if (op->addr()->is_register()) {
1461 addr = as_reg(op->addr());
1462 } else {
1463 assert(op->addr()->is_address(), "what else?");
1464 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1938 __ cmp(left->as_register_lo(), right->as_register_lo());
1939 __ mov(dst->as_register(), (uint64_t)-1L);
1940 __ br(Assembler::LT, done);
1941 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1942 __ bind(done);
1943 } else {
1944 ShouldNotReachHere();
1945 }
1946 }
1947
1948
1949 void LIR_Assembler::align_call(LIR_Code code) { }
1950
1951
1952 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1953 address call = __ trampoline_call(Address(op->addr(), rtype));
1954 if (call == nullptr) {
1955 bailout("trampoline stub overflow");
1956 return;
1957 }
1958 add_call_info(code_offset(), op->info());
1959 __ post_call_nop();
1960 }
1961
1962
1963 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1964 address call = __ ic_call(op->addr());
1965 if (call == nullptr) {
1966 bailout("trampoline stub overflow");
1967 return;
1968 }
1969 add_call_info(code_offset(), op->info());
1970 __ post_call_nop();
1971 }
1972
1973 void LIR_Assembler::emit_static_call_stub() {
1974 address call_pc = __ pc();
1975 address stub = __ start_a_stub(call_stub_size());
1976 if (stub == nullptr) {
1977 bailout("static call stub overflow");
1978 return;
1979 }
1980
1981 int start = __ offset();
1982
1983 __ relocate(static_stub_Relocation::spec(call_pc));
1984 __ emit_static_call_stub();
1985
1986 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
1987 <= call_stub_size(), "stub too big");
1988 __ end_a_stub();
1989 }
2112
2113
2114 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2115 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2116 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2117 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2118 __ mov (rscratch1, c);
2119 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2120 }
2121
2122
2123 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2124 ShouldNotReachHere();
2125 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2126 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2127 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2128 __ lea(rscratch1, __ constant_oop_address(o));
2129 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2130 }
2131
2132
2133 // This code replaces a call to arraycopy; no exception may
2134 // be thrown in this code, they must be thrown in the System.arraycopy
2135 // activation frame; we could save some checks if this would not be the case
2136 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2137 ciArrayKlass* default_type = op->expected_type();
2138 Register src = op->src()->as_register();
2139 Register dst = op->dst()->as_register();
2140 Register src_pos = op->src_pos()->as_register();
2141 Register dst_pos = op->dst_pos()->as_register();
2142 Register length = op->length()->as_register();
2143 Register tmp = op->tmp()->as_register();
2144
2145 CodeStub* stub = op->stub();
2146 int flags = op->flags();
2147 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2148 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2149
2150 // if we don't know anything, just go through the generic arraycopy
2151 if (default_type == nullptr // || basic_type == T_OBJECT
2152 ) {
2153 Label done;
2154 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2155
2156 // Save the arguments in case the generic arraycopy fails and we
2157 // have to fall back to the JNI stub
2158 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2159 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2160 __ str(src, Address(sp, 4*BytesPerWord));
2161
2162 address copyfunc_addr = StubRoutines::generic_arraycopy();
2163 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2164
2165 // The arguments are in java calling convention so we shift them
2166 // to C convention
2167 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2168 __ mov(c_rarg0, j_rarg0);
2169 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2183 __ cbz(r0, *stub->continuation());
2184
2185 // Reload values from the stack so they are where the stub
2186 // expects them.
2187 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2188 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2189 __ ldr(src, Address(sp, 4*BytesPerWord));
2190
2191 // r0 is -1^K where K == partial copied count
2192 __ eonw(rscratch1, r0, zr);
2193 // adjust length down and src/end pos up by partial copied count
2194 __ subw(length, length, rscratch1);
2195 __ addw(src_pos, src_pos, rscratch1);
2196 __ addw(dst_pos, dst_pos, rscratch1);
2197 __ b(*stub->entry());
2198
2199 __ bind(*stub->continuation());
2200 return;
2201 }
2202
2203 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2204
2205 int elem_size = type2aelembytes(basic_type);
2206 int scale = exact_log2(elem_size);
2207
2208 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2209 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2210
2211 // test for null
2212 if (flags & LIR_OpArrayCopy::src_null_check) {
2213 __ cbz(src, *stub->entry());
2214 }
2215 if (flags & LIR_OpArrayCopy::dst_null_check) {
2216 __ cbz(dst, *stub->entry());
2217 }
2218
2219 // If the compiler was not able to prove that exact type of the source or the destination
2220 // of the arraycopy is an array type, check at runtime if the source or the destination is
2221 // an instance type.
2222 if (flags & LIR_OpArrayCopy::type_check) {
2697 __ verify_klass_ptr(tmp);
2698 #endif
2699 } else {
2700 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2701 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2702
2703 __ ldr(tmp, mdo_addr);
2704 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2705
2706 __ orr(tmp, tmp, TypeEntries::type_unknown);
2707 __ str(tmp, mdo_addr);
2708 // FIXME: Write barrier needed here?
2709 }
2710 }
2711
2712 __ bind(next);
2713 }
2714 COMMENT("} emit_profile_type");
2715 }
2716
2717
2718 void LIR_Assembler::align_backward_branch_target() {
2719 }
2720
2721
2722 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2723 // tmp must be unused
2724 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2725
2726 if (left->is_single_cpu()) {
2727 assert(dest->is_single_cpu(), "expect single result reg");
2728 __ negw(dest->as_register(), left->as_register());
2729 } else if (left->is_double_cpu()) {
2730 assert(dest->is_double_cpu(), "expect double result reg");
2731 __ neg(dest->as_register_lo(), left->as_register_lo());
2732 } else if (left->is_single_fpu()) {
2733 assert(dest->is_single_fpu(), "expect single float result reg");
2734 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2735 } else {
2736 assert(left->is_double_fpu(), "expect double float operand reg");
2836 void LIR_Assembler::membar_loadload() {
2837 __ membar(Assembler::LoadLoad);
2838 }
2839
2840 void LIR_Assembler::membar_storestore() {
2841 __ membar(MacroAssembler::StoreStore);
2842 }
2843
2844 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2845
2846 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2847
2848 void LIR_Assembler::on_spin_wait() {
2849 __ spin_wait();
2850 }
2851
2852 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2853 __ mov(result_reg->as_register(), rthread);
2854 }
2855
2856
2857 void LIR_Assembler::peephole(LIR_List *lir) {
2858 #if 0
2859 if (tableswitch_count >= max_tableswitches)
2860 return;
2861
2862 /*
2863 This finite-state automaton recognizes sequences of compare-and-
2864 branch instructions. We will turn them into a tableswitch. You
2865 could argue that C1 really shouldn't be doing this sort of
2866 optimization, but without it the code is really horrible.
2867 */
2868
2869 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2870 int first_key, last_key = -2147483648;
2871 int next_key = 0;
2872 int start_insn = -1;
2873 int last_insn = -1;
2874 Register reg = noreg;
2875 LIR_Opr reg_opr;
|
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "ci/ciObjArrayKlass.hpp"
38 #include "code/compiledIC.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gc_globals.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_aarch64.inline.hpp"
49
50
51 #ifndef PRODUCT
52 #define COMMENT(x) do { __ block_comment(x); } while (0)
53 #else
54 #define COMMENT(x)
55 #endif
56
57 NEEDS_CLEANUP // remove this definitions ?
58 const Register SYNC_header = r0; // synchronization header
59 const Register SHIFT_count = r0; // where count for shift operations must be
60
61 #define __ _masm->
62
63
412 MonitorExitStub* stub = nullptr;
413 if (method()->is_synchronized()) {
414 monitor_address(0, FrameMap::r0_opr);
415 stub = new MonitorExitStub(FrameMap::r0_opr, 0);
416 __ unlock_object(r5, r4, r0, r6, *stub->entry());
417 __ bind(*stub->continuation());
418 }
419
420 if (compilation()->env()->dtrace_method_probes()) {
421 __ mov(c_rarg0, rthread);
422 __ mov_metadata(c_rarg1, method()->constant_encoding());
423 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
424 }
425
426 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
427 __ mov(r0, r19); // Restore the exception
428 }
429
430 // remove the activation and dispatch to the unwind handler
431 __ block_comment("remove_frame and dispatch to the unwind handler");
432 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
433 __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
434
435 // Emit the slow path assembly
436 if (stub != nullptr) {
437 stub->emit_code(this);
438 }
439
440 return offset;
441 }
442
443
444 int LIR_Assembler::emit_deopt_handler() {
445 // generate code for exception handler
446 address handler_base = __ start_a_stub(deopt_handler_size());
447 if (handler_base == nullptr) {
448 // not enough space left for the handler
449 bailout("deopt handler overflow");
450 return -1;
451 }
452
464 assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
465 "out of bounds read in post-call NOP check");
466 __ end_a_stub();
467
468 return entry_offset;
469 }
470
471 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
472 _masm->code_section()->relocate(adr, relocInfo::poll_type);
473 int pc_offset = code_offset();
474 flush_debug_info(pc_offset);
475 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
476 if (info->exception_handlers() != nullptr) {
477 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
478 }
479 }
480
481 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
482 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
483
484 if (InlineTypeReturnedAsFields) {
485 // Check if we are returning an non-null inline type and load its fields into registers
486 ciType* return_type = compilation()->method()->return_type();
487 if (return_type->is_inlinetype()) {
488 ciInlineKlass* vk = return_type->as_inline_klass();
489 if (vk->can_be_returned_as_fields()) {
490 address unpack_handler = vk->unpack_handler();
491 assert(unpack_handler != nullptr, "must be");
492 __ far_call(RuntimeAddress(unpack_handler));
493 }
494 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
495 Label skip;
496 Label not_null;
497 __ cbnz(r0, not_null);
498 // Returned value is null, zero all return registers because they may belong to oop fields
499 __ mov(j_rarg1, zr);
500 __ mov(j_rarg2, zr);
501 __ mov(j_rarg3, zr);
502 __ mov(j_rarg4, zr);
503 __ mov(j_rarg5, zr);
504 __ mov(j_rarg6, zr);
505 __ mov(j_rarg7, zr);
506 __ b(skip);
507 __ bind(not_null);
508
509 // Check if we are returning an non-null inline type and load its fields into registers
510 __ test_oop_is_not_inline_type(r0, rscratch2, skip, /* can_be_null= */ false);
511
512 // Load fields from a buffered value with an inline class specific handler
513 __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
514 __ ldr(rscratch1, Address(rscratch1, InlineKlass::adr_members_offset()));
515 __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
516 // Unpack handler can be null if inline type is not scalarizable in returns
517 __ cbz(rscratch1, skip);
518 __ blr(rscratch1);
519
520 __ bind(skip);
521 }
522 // At this point, r0 points to the value object (for interpreter or C1 caller).
523 // The fields of the object are copied into registers (for C2 caller).
524 }
525
526 // Pop the stack before the safepoint code
527 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
528
529 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
530 __ reserved_stack_check();
531 }
532
533 code_stub->set_safepoint_offset(__ offset());
534 __ relocate(relocInfo::poll_return_type);
535 __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
536 __ ret(lr);
537 }
538
539 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
540 return (__ store_inline_type_fields_to_buf(vk, false));
541 }
542
543 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
544 guarantee(info != nullptr, "Shouldn't be null");
545 __ get_polling_page(rscratch1, relocInfo::poll_type);
546 add_debug_info_for_branch(info); // This isn't just debug info:
547 // it's the oop map
548 __ read_polling_page(rscratch1, relocInfo::poll_type);
549 return __ offset();
550 }
551
552
553 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
554 if (from_reg == r31_sp)
555 from_reg = sp;
556 if (to_reg == r31_sp)
557 to_reg = sp;
558 __ mov(to_reg, from_reg);
559 }
560
561 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
562
569 switch (c->type()) {
570 case T_INT: {
571 assert(patch_code == lir_patch_none, "no patching handled here");
572 __ movw(dest->as_register(), c->as_jint());
573 break;
574 }
575
576 case T_ADDRESS: {
577 assert(patch_code == lir_patch_none, "no patching handled here");
578 __ mov(dest->as_register(), c->as_jint());
579 break;
580 }
581
582 case T_LONG: {
583 assert(patch_code == lir_patch_none, "no patching handled here");
584 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
585 break;
586 }
587
588 case T_OBJECT: {
589 if (patch_code != lir_patch_none) {
590 jobject2reg_with_patching(dest->as_register(), info);
591 } else {
592 jobject2reg(c->as_jobject(), dest->as_register());
593 }
594 break;
595 }
596
597 case T_METADATA: {
598 if (patch_code != lir_patch_none) {
599 klass2reg_with_patching(dest->as_register(), info);
600 } else {
601 __ mov_metadata(dest->as_register(), c->as_metadata());
602 }
603 break;
604 }
605
606 case T_FLOAT: {
607 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
608 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
609 } else {
610 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
611 __ ldrs(dest->as_float_reg(), Address(rscratch1));
612 }
682 LIR_Const* c = src->as_constant_ptr();
683 LIR_Address* to_addr = dest->as_address_ptr();
684
685 void (Assembler::* insn)(Register Rt, const Address &adr);
686
687 switch (type) {
688 case T_ADDRESS:
689 assert(c->as_jint() == 0, "should be");
690 insn = &Assembler::str;
691 break;
692 case T_LONG:
693 assert(c->as_jlong() == 0, "should be");
694 insn = &Assembler::str;
695 break;
696 case T_INT:
697 assert(c->as_jint() == 0, "should be");
698 insn = &Assembler::strw;
699 break;
700 case T_OBJECT:
701 case T_ARRAY:
702 // Non-null case is not handled on aarch64 but handled on x86
703 // FIXME: do we need to add it here?
704 assert(c->as_jobject() == nullptr, "should be");
705 if (UseCompressedOops && !wide) {
706 insn = &Assembler::strw;
707 } else {
708 insn = &Assembler::str;
709 }
710 break;
711 case T_CHAR:
712 case T_SHORT:
713 assert(c->as_jint() == 0, "should be");
714 insn = &Assembler::strh;
715 break;
716 case T_BOOLEAN:
717 case T_BYTE:
718 assert(c->as_jint() == 0, "should be");
719 insn = &Assembler::strb;
720 break;
721 default:
722 ShouldNotReachHere();
723 insn = &Assembler::str; // unreachable
1031 case T_CHAR:
1032 __ ldrh(dest->as_register(), as_Address(from_addr));
1033 break;
1034 case T_SHORT:
1035 __ ldrsh(dest->as_register(), as_Address(from_addr));
1036 break;
1037
1038 default:
1039 ShouldNotReachHere();
1040 }
1041
1042 if (is_reference_type(type)) {
1043 if (UseCompressedOops && !wide) {
1044 __ decode_heap_oop(dest->as_register());
1045 }
1046
1047 __ verify_oop(dest->as_register());
1048 }
1049 }
1050
1051 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1052 assert(dst->is_cpu_register(), "must be");
1053 assert(dst->type() == src->type(), "must be");
1054
1055 if (src->is_cpu_register()) {
1056 reg2reg(src, dst);
1057 } else if (src->is_stack()) {
1058 stack2reg(src, dst, dst->type());
1059 } else if (src->is_constant()) {
1060 const2reg(src, dst, lir_patch_none, nullptr);
1061 } else {
1062 ShouldNotReachHere();
1063 }
1064 }
1065
1066 int LIR_Assembler::array_element_size(BasicType type) const {
1067 int elem_size = type2aelembytes(type);
1068 return exact_log2(elem_size);
1069 }
1070
1071
1072 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1073 switch (op->code()) {
1074 case lir_idiv:
1075 case lir_irem:
1076 arithmetic_idiv(op->code(),
1077 op->in_opr1(),
1078 op->in_opr2(),
1079 op->in_opr3(),
1080 op->result_opr(),
1081 op->info());
1082 break;
1083 case lir_fmad:
1084 __ fmaddd(op->result_opr()->as_double_reg(),
1236 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1237 __ ldarb(rscratch1, rscratch1);
1238 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1239 add_debug_info_for_null_check_here(op->stub()->info());
1240 __ br(Assembler::NE, *op->stub()->entry());
1241 }
1242 __ allocate_object(op->obj()->as_register(),
1243 op->tmp1()->as_register(),
1244 op->tmp2()->as_register(),
1245 op->header_size(),
1246 op->object_size(),
1247 op->klass()->as_register(),
1248 *op->stub()->entry());
1249 __ bind(*op->stub()->continuation());
1250 }
1251
1252 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1253 Register len = op->len()->as_register();
1254 __ uxtw(len, len);
1255
1256 if (UseSlowPath || op->always_slow_path() ||
1257 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1258 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1259 __ b(*op->stub()->entry());
1260 } else {
1261 Register tmp1 = op->tmp1()->as_register();
1262 Register tmp2 = op->tmp2()->as_register();
1263 Register tmp3 = op->tmp3()->as_register();
1264 if (len == tmp1) {
1265 tmp1 = tmp3;
1266 } else if (len == tmp2) {
1267 tmp2 = tmp3;
1268 } else if (len == tmp3) {
1269 // everything is ok
1270 } else {
1271 __ mov(tmp3, len);
1272 }
1273 __ allocate_array(op->obj()->as_register(),
1274 len,
1275 tmp1,
1276 tmp2,
1316 assert(data != nullptr, "need data for type check");
1317 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1318 }
1319 Label* success_target = success;
1320 Label* failure_target = failure;
1321
1322 if (obj == k_RInfo) {
1323 k_RInfo = dst;
1324 } else if (obj == klass_RInfo) {
1325 klass_RInfo = dst;
1326 }
1327 if (k->is_loaded() && !UseCompressedClassPointers) {
1328 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1329 } else {
1330 Rtmp1 = op->tmp3()->as_register();
1331 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1332 }
1333
1334 assert_different_registers(obj, k_RInfo, klass_RInfo);
1335
1336 if (op->need_null_check()) {
1337 if (should_profile) {
1338 Register mdo = klass_RInfo;
1339 __ mov_metadata(mdo, md->constant_encoding());
1340 Label not_null;
1341 __ cbnz(obj, not_null);
1342 // Object is null; update MDO and exit
1343 Address data_addr
1344 = __ form_address(rscratch2, mdo,
1345 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1346 0);
1347 __ ldrb(rscratch1, data_addr);
1348 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1349 __ strb(rscratch1, data_addr);
1350 __ b(*obj_is_null);
1351 __ bind(not_null);
1352
1353 Register recv = k_RInfo;
1354 __ load_klass(recv, obj);
1355 type_profile_helper(mdo, md, data, recv);
1356 } else {
1357 __ cbz(obj, *obj_is_null);
1358 }
1359 }
1360
1361 if (!k->is_loaded()) {
1362 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1363 } else {
1364 __ mov_metadata(k_RInfo, k->constant_encoding());
1365 }
1366 __ verify_oop(obj);
1367
1368 if (op->fast_check()) {
1369 assert(!k->is_loaded() || !k->is_obj_array_klass(), "Use refined array for a direct pointer comparison");
1370 // get object class
1371 // not a safepoint as obj null check happens earlier
1372 __ load_klass(rscratch1, obj);
1373 __ cmp( rscratch1, k_RInfo);
1374
1375 __ br(Assembler::NE, *failure_target);
1376 // successful cast, fall through to profile or jump
1377 } else {
1378 // get object class
1379 // not a safepoint as obj null check happens earlier
1380 __ load_klass(klass_RInfo, obj);
1381 if (k->is_loaded()) {
1382 // See if we get an immediate positive hit
1383 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1384 __ cmp(k_RInfo, rscratch1);
1385 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1386 __ br(Assembler::NE, *failure_target);
1387 // successful cast, fall through to profile or jump
1388 } else {
1389 // See if we get an immediate positive hit
1390 __ br(Assembler::EQ, *success_target);
1391 // check for self
1392 if (k->is_loaded() && k->is_obj_array_klass()) {
1393 // For a direct pointer comparison, we need the refined array klass pointer
1394 ciKlass* k_refined = ciObjArrayKlass::make(k->as_obj_array_klass()->element_klass());
1395 __ mov_metadata(rscratch1, k_refined->constant_encoding());
1396 __ cmp(klass_RInfo, rscratch1);
1397 } else {
1398 __ cmp(klass_RInfo, k_RInfo);
1399 }
1400 __ br(Assembler::EQ, *success_target);
1401
1402 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1403 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1404 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1405 // result is a boolean
1406 __ cbzw(klass_RInfo, *failure_target);
1407 // successful cast, fall through to profile or jump
1408 }
1409 } else {
1410 // perform the fast part of the checking logic
1411 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1412 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1413 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1414 __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1415 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1416 // result is a boolean
1417 __ cbz(k_RInfo, *failure_target);
1418 // successful cast, fall through to profile or jump
1419 }
1500 __ bind(success);
1501 if (dst != obj) {
1502 __ mov(dst, obj);
1503 }
1504 } else if (code == lir_instanceof) {
1505 Register obj = op->object()->as_register();
1506 Register dst = op->result_opr()->as_register();
1507 Label success, failure, done;
1508 emit_typecheck_helper(op, &success, &failure, &failure);
1509 __ bind(failure);
1510 __ mov(dst, zr);
1511 __ b(done);
1512 __ bind(success);
1513 __ mov(dst, 1);
1514 __ bind(done);
1515 } else {
1516 ShouldNotReachHere();
1517 }
1518 }
1519
1520 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1521 // We are loading/storing from/to an array that *may* be a flat array (the
1522 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1523 // If this array is a flat array, take the slow path.
1524 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1525 if (!op->value()->is_illegal()) {
1526 // The array is not a flat array, but it might be null-free. If we are storing
1527 // a null into a null-free array, take the slow path (which will throw NPE).
1528 Label skip;
1529 __ cbnz(op->value()->as_register(), skip);
1530 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1531 __ bind(skip);
1532 }
1533 }
1534
1535 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1536 // We are storing into an array that *may* be null-free (the declared type is
1537 // Object[], abstract[], interface[] or VT.ref[]).
1538 Label test_mark_word;
1539 Register tmp = op->tmp()->as_register();
1540 __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1541 __ tst(tmp, markWord::unlocked_value);
1542 __ br(Assembler::NE, test_mark_word);
1543 __ load_prototype_header(tmp, op->array()->as_register());
1544 __ bind(test_mark_word);
1545 __ tst(tmp, markWord::null_free_array_bit_in_place);
1546 }
1547
1548 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1549 Label L_oops_equal;
1550 Label L_oops_not_equal;
1551 Label L_end;
1552
1553 Register left = op->left()->as_register();
1554 Register right = op->right()->as_register();
1555
1556 __ cmp(left, right);
1557 __ br(Assembler::EQ, L_oops_equal);
1558
1559 // (1) Null check -- if one of the operands is null, the other must not be null (because
1560 // the two references are not equal), so they are not substitutable,
1561 // FIXME: do null check only if the operand is nullable
1562 {
1563 __ cbz(left, L_oops_not_equal);
1564 __ cbz(right, L_oops_not_equal);
1565 }
1566
1567 ciKlass* left_klass = op->left_klass();
1568 ciKlass* right_klass = op->right_klass();
1569
1570 // (2) Inline type check -- if either of the operands is not a inline type,
1571 // they are not substitutable. We do this only if we are not sure that the
1572 // operands are inline type
1573 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1574 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1575 Register tmp1 = op->tmp1()->as_register();
1576 __ mov(tmp1, markWord::inline_type_pattern);
1577 __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1578 __ andr(tmp1, tmp1, rscratch1);
1579 __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1580 __ andr(tmp1, tmp1, rscratch1);
1581 __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1582 __ br(Assembler::NE, L_oops_not_equal);
1583 }
1584
1585 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1586 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1587 // No need to load klass -- the operands are statically known to be the same inline klass.
1588 __ b(*op->stub()->entry());
1589 } else {
1590 Register left_klass_op = op->left_klass_op()->as_register();
1591 Register right_klass_op = op->right_klass_op()->as_register();
1592
1593 if (UseCompressedClassPointers) {
1594 __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1595 __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1596 __ cmpw(left_klass_op, right_klass_op);
1597 } else {
1598 __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1599 __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1600 __ cmp(left_klass_op, right_klass_op);
1601 }
1602
1603 __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1604 // fall through to L_oops_not_equal
1605 }
1606
1607 __ bind(L_oops_not_equal);
1608 move(op->not_equal_result(), op->result_opr());
1609 __ b(L_end);
1610
1611 __ bind(L_oops_equal);
1612 move(op->equal_result(), op->result_opr());
1613 __ b(L_end);
1614
1615 // We've returned from the stub. R0 contains 0x0 IFF the two
1616 // operands are not substitutable. (Don't compare against 0x1 in case the
1617 // C compiler is naughty)
1618 __ bind(*op->stub()->continuation());
1619 __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1620 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1621 // fall-through
1622 __ bind(L_end);
1623 }
1624
1625
1626 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1627 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1628 __ cset(rscratch1, Assembler::NE);
1629 __ membar(__ AnyAny);
1630 }
1631
1632 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1633 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1634 __ cset(rscratch1, Assembler::NE);
1635 __ membar(__ AnyAny);
1636 }
1637
1638
1639 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1640 Register addr;
1641 if (op->addr()->is_register()) {
1642 addr = as_reg(op->addr());
1643 } else {
1644 assert(op->addr()->is_address(), "what else?");
1645 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
2119 __ cmp(left->as_register_lo(), right->as_register_lo());
2120 __ mov(dst->as_register(), (uint64_t)-1L);
2121 __ br(Assembler::LT, done);
2122 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2123 __ bind(done);
2124 } else {
2125 ShouldNotReachHere();
2126 }
2127 }
2128
2129
2130 void LIR_Assembler::align_call(LIR_Code code) { }
2131
2132
2133 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2134 address call = __ trampoline_call(Address(op->addr(), rtype));
2135 if (call == nullptr) {
2136 bailout("trampoline stub overflow");
2137 return;
2138 }
2139 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2140 __ post_call_nop();
2141 }
2142
2143
2144 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2145 address call = __ ic_call(op->addr());
2146 if (call == nullptr) {
2147 bailout("trampoline stub overflow");
2148 return;
2149 }
2150 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2151 __ post_call_nop();
2152 }
2153
2154 void LIR_Assembler::emit_static_call_stub() {
2155 address call_pc = __ pc();
2156 address stub = __ start_a_stub(call_stub_size());
2157 if (stub == nullptr) {
2158 bailout("static call stub overflow");
2159 return;
2160 }
2161
2162 int start = __ offset();
2163
2164 __ relocate(static_stub_Relocation::spec(call_pc));
2165 __ emit_static_call_stub();
2166
2167 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2168 <= call_stub_size(), "stub too big");
2169 __ end_a_stub();
2170 }
2293
2294
2295 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2296 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2297 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2298 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2299 __ mov (rscratch1, c);
2300 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2301 }
2302
2303
2304 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2305 ShouldNotReachHere();
2306 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2307 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2308 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2309 __ lea(rscratch1, __ constant_oop_address(o));
2310 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2311 }
2312
2313 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2314 if (null_check) {
2315 __ cbz(obj, *slow_path->entry());
2316 }
2317 if (is_dest) {
2318 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2319 // TODO 8350865 Flat no longer implies null-free, so we need to check for flat dest. Can we do better here?
2320 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2321 } else {
2322 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2323 }
2324 }
2325
2326 // This code replaces a call to arraycopy; no exception may
2327 // be thrown in this code, they must be thrown in the System.arraycopy
2328 // activation frame; we could save some checks if this would not be the case
2329 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2330 ciArrayKlass* default_type = op->expected_type();
2331 Register src = op->src()->as_register();
2332 Register dst = op->dst()->as_register();
2333 Register src_pos = op->src_pos()->as_register();
2334 Register dst_pos = op->dst_pos()->as_register();
2335 Register length = op->length()->as_register();
2336 Register tmp = op->tmp()->as_register();
2337
2338 CodeStub* stub = op->stub();
2339 int flags = op->flags();
2340 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2341 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2342
2343 if (flags & LIR_OpArrayCopy::always_slow_path) {
2344 __ b(*stub->entry());
2345 __ bind(*stub->continuation());
2346 return;
2347 }
2348
2349 // if we don't know anything, just go through the generic arraycopy
2350 if (default_type == nullptr // || basic_type == T_OBJECT
2351 ) {
2352 Label done;
2353 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2354
2355 // Save the arguments in case the generic arraycopy fails and we
2356 // have to fall back to the JNI stub
2357 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2358 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2359 __ str(src, Address(sp, 4*BytesPerWord));
2360
2361 address copyfunc_addr = StubRoutines::generic_arraycopy();
2362 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2363
2364 // The arguments are in java calling convention so we shift them
2365 // to C convention
2366 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2367 __ mov(c_rarg0, j_rarg0);
2368 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2382 __ cbz(r0, *stub->continuation());
2383
2384 // Reload values from the stack so they are where the stub
2385 // expects them.
2386 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2387 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2388 __ ldr(src, Address(sp, 4*BytesPerWord));
2389
2390 // r0 is -1^K where K == partial copied count
2391 __ eonw(rscratch1, r0, zr);
2392 // adjust length down and src/end pos up by partial copied count
2393 __ subw(length, length, rscratch1);
2394 __ addw(src_pos, src_pos, rscratch1);
2395 __ addw(dst_pos, dst_pos, rscratch1);
2396 __ b(*stub->entry());
2397
2398 __ bind(*stub->continuation());
2399 return;
2400 }
2401
2402 // Handle inline type arrays
2403 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2404 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2405 }
2406 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2407 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2408 }
2409
2410 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2411
2412 int elem_size = type2aelembytes(basic_type);
2413 int scale = exact_log2(elem_size);
2414
2415 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2416 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2417
2418 // test for null
2419 if (flags & LIR_OpArrayCopy::src_null_check) {
2420 __ cbz(src, *stub->entry());
2421 }
2422 if (flags & LIR_OpArrayCopy::dst_null_check) {
2423 __ cbz(dst, *stub->entry());
2424 }
2425
2426 // If the compiler was not able to prove that exact type of the source or the destination
2427 // of the arraycopy is an array type, check at runtime if the source or the destination is
2428 // an instance type.
2429 if (flags & LIR_OpArrayCopy::type_check) {
2904 __ verify_klass_ptr(tmp);
2905 #endif
2906 } else {
2907 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2908 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2909
2910 __ ldr(tmp, mdo_addr);
2911 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2912
2913 __ orr(tmp, tmp, TypeEntries::type_unknown);
2914 __ str(tmp, mdo_addr);
2915 // FIXME: Write barrier needed here?
2916 }
2917 }
2918
2919 __ bind(next);
2920 }
2921 COMMENT("} emit_profile_type");
2922 }
2923
2924 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
2925 Register obj = op->obj()->as_register();
2926 Register tmp = op->tmp()->as_pointer_register();
2927 bool not_null = op->not_null();
2928 int flag = op->flag();
2929
2930 Label not_inline_type;
2931 if (!not_null) {
2932 __ cbz(obj, not_inline_type);
2933 }
2934
2935 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
2936
2937 Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
2938 __ ldrb(rscratch1, mdo_addr);
2939 __ orr(rscratch1, rscratch1, flag);
2940 __ strb(rscratch1, mdo_addr);
2941
2942 __ bind(not_inline_type);
2943 }
2944
2945 void LIR_Assembler::align_backward_branch_target() {
2946 }
2947
2948
2949 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2950 // tmp must be unused
2951 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2952
2953 if (left->is_single_cpu()) {
2954 assert(dest->is_single_cpu(), "expect single result reg");
2955 __ negw(dest->as_register(), left->as_register());
2956 } else if (left->is_double_cpu()) {
2957 assert(dest->is_double_cpu(), "expect double result reg");
2958 __ neg(dest->as_register_lo(), left->as_register_lo());
2959 } else if (left->is_single_fpu()) {
2960 assert(dest->is_single_fpu(), "expect single float result reg");
2961 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2962 } else {
2963 assert(left->is_double_fpu(), "expect double float operand reg");
3063 void LIR_Assembler::membar_loadload() {
3064 __ membar(Assembler::LoadLoad);
3065 }
3066
3067 void LIR_Assembler::membar_storestore() {
3068 __ membar(MacroAssembler::StoreStore);
3069 }
3070
3071 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3072
3073 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3074
3075 void LIR_Assembler::on_spin_wait() {
3076 __ spin_wait();
3077 }
3078
3079 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3080 __ mov(result_reg->as_register(), rthread);
3081 }
3082
3083 void LIR_Assembler::check_orig_pc() {
3084 __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3085 __ cmp(rscratch2, (u1)NULL_WORD);
3086 }
3087
3088 void LIR_Assembler::peephole(LIR_List *lir) {
3089 #if 0
3090 if (tableswitch_count >= max_tableswitches)
3091 return;
3092
3093 /*
3094 This finite-state automaton recognizes sequences of compare-and-
3095 branch instructions. We will turn them into a tableswitch. You
3096 could argue that C1 really shouldn't be doing this sort of
3097 optimization, but without it the code is really horrible.
3098 */
3099
3100 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3101 int first_key, last_key = -2147483648;
3102 int next_key = 0;
3103 int start_insn = -1;
3104 int last_insn = -1;
3105 Register reg = noreg;
3106 LIR_Opr reg_opr;
|