16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_aarch64.inline.hpp"
47
48
49 #ifndef PRODUCT
50 #define COMMENT(x) do { __ block_comment(x); } while (0)
51 #else
52 #define COMMENT(x)
53 #endif
54
55 NEEDS_CLEANUP // remove this definitions ?
56 const Register IC_Klass = rscratch2; // where the IC klass is cached
57 const Register SYNC_header = r0; // synchronization header
58 const Register SHIFT_count = r0; // where count for shift operations must be
59
60 #define __ _masm->
61
434 if (UseHeavyMonitors) {
435 __ b(*stub->entry());
436 } else {
437 __ unlock_object(r5, r4, r0, *stub->entry());
438 }
439 __ bind(*stub->continuation());
440 }
441
442 if (compilation()->env()->dtrace_method_probes()) {
443 __ mov(c_rarg0, rthread);
444 __ mov_metadata(c_rarg1, method()->constant_encoding());
445 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
446 }
447
448 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
449 __ mov(r0, r19); // Restore the exception
450 }
451
452 // remove the activation and dispatch to the unwind handler
453 __ block_comment("remove_frame and dispatch to the unwind handler");
454 __ remove_frame(initial_frame_size_in_bytes());
455 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
456
457 // Emit the slow path assembly
458 if (stub != NULL) {
459 stub->emit_code(this);
460 }
461
462 return offset;
463 }
464
465
466 int LIR_Assembler::emit_deopt_handler() {
467 // generate code for exception handler
468 address handler_base = __ start_a_stub(deopt_handler_size());
469 if (handler_base == NULL) {
470 // not enough space left for the handler
471 bailout("deopt handler overflow");
472 return -1;
473 }
474
478 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
479 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
480 __ end_a_stub();
481
482 return offset;
483 }
484
485 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
486 _masm->code_section()->relocate(adr, relocInfo::poll_type);
487 int pc_offset = code_offset();
488 flush_debug_info(pc_offset);
489 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
490 if (info->exception_handlers() != NULL) {
491 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
492 }
493 }
494
495 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
496 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
497
498 // Pop the stack before the safepoint code
499 __ remove_frame(initial_frame_size_in_bytes());
500
501 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
502 __ reserved_stack_check();
503 }
504
505 code_stub->set_safepoint_offset(__ offset());
506 __ relocate(relocInfo::poll_return_type);
507 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
508 __ ret(lr);
509 }
510
511 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
512 guarantee(info != NULL, "Shouldn't be NULL");
513 __ get_polling_page(rscratch1, relocInfo::poll_type);
514 add_debug_info_for_branch(info); // This isn't just debug info:
515 // it's the oop map
516 __ read_polling_page(rscratch1, relocInfo::poll_type);
517 return __ offset();
518 }
519
520
521 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
522 if (from_reg == r31_sp)
523 from_reg = sp;
524 if (to_reg == r31_sp)
525 to_reg = sp;
526 __ mov(to_reg, from_reg);
527 }
528
529 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
530
536
537 switch (c->type()) {
538 case T_INT: {
539 assert(patch_code == lir_patch_none, "no patching handled here");
540 __ movw(dest->as_register(), c->as_jint());
541 break;
542 }
543
544 case T_ADDRESS: {
545 assert(patch_code == lir_patch_none, "no patching handled here");
546 __ mov(dest->as_register(), c->as_jint());
547 break;
548 }
549
550 case T_LONG: {
551 assert(patch_code == lir_patch_none, "no patching handled here");
552 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
553 break;
554 }
555
556 case T_OBJECT: {
557 if (patch_code == lir_patch_none) {
558 jobject2reg(c->as_jobject(), dest->as_register());
559 } else {
560 jobject2reg_with_patching(dest->as_register(), info);
561 }
562 break;
563 }
564
565 case T_METADATA: {
566 if (patch_code != lir_patch_none) {
567 klass2reg_with_patching(dest->as_register(), info);
568 } else {
569 __ mov_metadata(dest->as_register(), c->as_metadata());
570 }
571 break;
572 }
573
574 case T_FLOAT: {
575 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
576 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
577 } else {
578 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
579 __ ldrs(dest->as_float_reg(), Address(rscratch1));
580 }
582 }
583
584 case T_DOUBLE: {
585 if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
586 __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
587 } else {
588 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
589 __ ldrd(dest->as_double_reg(), Address(rscratch1));
590 }
591 break;
592 }
593
594 default:
595 ShouldNotReachHere();
596 }
597 }
598
599 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
600 LIR_Const* c = src->as_constant_ptr();
601 switch (c->type()) {
602 case T_OBJECT:
603 {
604 if (! c->as_jobject())
605 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
606 else {
607 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
608 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
609 }
610 }
611 break;
612 case T_ADDRESS:
613 {
614 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
615 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
616 }
617 case T_INT:
618 case T_FLOAT:
619 {
620 Register reg = zr;
621 if (c->as_jint_bits() == 0)
648 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
649 assert(src->is_constant(), "should not call otherwise");
650 LIR_Const* c = src->as_constant_ptr();
651 LIR_Address* to_addr = dest->as_address_ptr();
652
653 void (Assembler::* insn)(Register Rt, const Address &adr);
654
655 switch (type) {
656 case T_ADDRESS:
657 assert(c->as_jint() == 0, "should be");
658 insn = &Assembler::str;
659 break;
660 case T_LONG:
661 assert(c->as_jlong() == 0, "should be");
662 insn = &Assembler::str;
663 break;
664 case T_INT:
665 assert(c->as_jint() == 0, "should be");
666 insn = &Assembler::strw;
667 break;
668 case T_OBJECT:
669 case T_ARRAY:
670 assert(c->as_jobject() == 0, "should be");
671 if (UseCompressedOops && !wide) {
672 insn = &Assembler::strw;
673 } else {
674 insn = &Assembler::str;
675 }
676 break;
677 case T_CHAR:
678 case T_SHORT:
679 assert(c->as_jint() == 0, "should be");
680 insn = &Assembler::strh;
681 break;
682 case T_BOOLEAN:
683 case T_BYTE:
684 assert(c->as_jint() == 0, "should be");
685 insn = &Assembler::strb;
686 break;
687 default:
688 ShouldNotReachHere();
689 insn = &Assembler::str; // unreachable
690 }
691
692 if (info) add_debug_info_for_null_check_here(info);
693 (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
694 }
695
696 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
697 assert(src->is_register(), "should not call otherwise");
698 assert(dest->is_register(), "should not call otherwise");
699
700 // move between cpu-registers
701 if (dest->is_single_cpu()) {
702 if (src->type() == T_LONG) {
703 // Can do LONG -> OBJECT
704 move_regs(src->as_register_lo(), dest->as_register());
705 return;
706 }
707 assert(src->is_single_cpu(), "must match");
708 if (src->type() == T_OBJECT) {
709 __ verify_oop(src->as_register());
710 }
711 move_regs(src->as_register(), dest->as_register());
712
713 } else if (dest->is_double_cpu()) {
714 if (is_reference_type(src->type())) {
715 // Surprising to me but we can see move of a long to t_object
716 __ verify_oop(src->as_register());
717 move_regs(src->as_register(), dest->as_register_lo());
718 return;
719 }
720 assert(src->is_double_cpu(), "must match");
721 Register f_lo = src->as_register_lo();
722 Register f_hi = src->as_register_hi();
723 Register t_lo = dest->as_register_lo();
724 Register t_hi = dest->as_register_hi();
725 assert(f_hi == f_lo, "must be same");
726 assert(t_hi == t_lo, "must be same");
727 move_regs(f_lo, t_lo);
728
788
789 if (UseCompressedOops && !wide) {
790 __ encode_heap_oop(compressed_src, src->as_register());
791 } else {
792 compressed_src = src->as_register();
793 }
794 }
795
796 int null_check_here = code_offset();
797 switch (type) {
798 case T_FLOAT: {
799 __ strs(src->as_float_reg(), as_Address(to_addr));
800 break;
801 }
802
803 case T_DOUBLE: {
804 __ strd(src->as_double_reg(), as_Address(to_addr));
805 break;
806 }
807
808 case T_ARRAY: // fall through
809 case T_OBJECT: // fall through
810 if (UseCompressedOops && !wide) {
811 __ strw(compressed_src, as_Address(to_addr, rscratch2));
812 } else {
813 __ str(compressed_src, as_Address(to_addr));
814 }
815 break;
816 case T_METADATA:
817 // We get here to store a method pointer to the stack to pass to
818 // a dtrace runtime call. This can't work on 64 bit with
819 // compressed klass ptrs: T_METADATA can be a compressed klass
820 // ptr or a 64 bit method pointer.
821 ShouldNotReachHere();
822 __ str(src->as_register(), as_Address(to_addr));
823 break;
824 case T_ADDRESS:
825 __ str(src->as_register(), as_Address(to_addr));
826 break;
827 case T_INT:
917 add_call_info_here(info);
918 }
919
920 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
921
922 LIR_Opr temp;
923 if (type == T_LONG || type == T_DOUBLE)
924 temp = FrameMap::rscratch1_long_opr;
925 else
926 temp = FrameMap::rscratch1_opr;
927
928 stack2reg(src, temp, src->type());
929 reg2stack(temp, dest, dest->type(), false);
930 }
931
932
933 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
934 LIR_Address* addr = src->as_address_ptr();
935 LIR_Address* from_addr = src->as_address_ptr();
936
937 if (addr->base()->type() == T_OBJECT) {
938 __ verify_oop(addr->base()->as_pointer_register());
939 }
940
941 if (patch_code != lir_patch_none) {
942 deoptimize_trap(info);
943 return;
944 }
945
946 if (info != NULL) {
947 add_debug_info_for_null_check_here(info);
948 }
949 int null_check_here = code_offset();
950 switch (type) {
951 case T_FLOAT: {
952 __ ldrs(dest->as_float_reg(), as_Address(from_addr));
953 break;
954 }
955
956 case T_DOUBLE: {
957 __ ldrd(dest->as_double_reg(), as_Address(from_addr));
958 break;
959 }
960
961 case T_ARRAY: // fall through
962 case T_OBJECT: // fall through
963 if (UseCompressedOops && !wide) {
964 __ ldrw(dest->as_register(), as_Address(from_addr));
965 } else {
966 __ ldr(dest->as_register(), as_Address(from_addr));
967 }
968 break;
969 case T_METADATA:
970 // We get here to store a method pointer to the stack to pass to
971 // a dtrace runtime call. This can't work on 64 bit with
972 // compressed klass ptrs: T_METADATA can be a compressed klass
973 // ptr or a 64 bit method pointer.
974 ShouldNotReachHere();
975 __ ldr(dest->as_register(), as_Address(from_addr));
976 break;
977 case T_ADDRESS:
978 __ ldr(dest->as_register(), as_Address(from_addr));
979 break;
980 case T_INT:
1000 case T_SHORT:
1001 __ ldrsh(dest->as_register(), as_Address(from_addr));
1002 break;
1003
1004 default:
1005 ShouldNotReachHere();
1006 }
1007
1008 if (is_reference_type(type)) {
1009 if (UseCompressedOops && !wide) {
1010 __ decode_heap_oop(dest->as_register());
1011 }
1012
1013 if (!UseZGC) {
1014 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1015 __ verify_oop(dest->as_register());
1016 }
1017 }
1018 }
1019
1020
1021 int LIR_Assembler::array_element_size(BasicType type) const {
1022 int elem_size = type2aelembytes(type);
1023 return exact_log2(elem_size);
1024 }
1025
1026
1027 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1028 switch (op->code()) {
1029 case lir_idiv:
1030 case lir_irem:
1031 arithmetic_idiv(op->code(),
1032 op->in_opr1(),
1033 op->in_opr2(),
1034 op->in_opr3(),
1035 op->result_opr(),
1036 op->info());
1037 break;
1038 case lir_fmad:
1039 __ fmaddd(op->result_opr()->as_double_reg(),
1191 __ ldrb(rscratch1, Address(op->klass()->as_register(),
1192 InstanceKlass::init_state_offset()));
1193 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1194 add_debug_info_for_null_check_here(op->stub()->info());
1195 __ br(Assembler::NE, *op->stub()->entry());
1196 }
1197 __ allocate_object(op->obj()->as_register(),
1198 op->tmp1()->as_register(),
1199 op->tmp2()->as_register(),
1200 op->header_size(),
1201 op->object_size(),
1202 op->klass()->as_register(),
1203 *op->stub()->entry());
1204 __ bind(*op->stub()->continuation());
1205 }
1206
1207 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1208 Register len = op->len()->as_register();
1209 __ uxtw(len, len);
1210
1211 if (UseSlowPath ||
1212 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1213 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1214 __ b(*op->stub()->entry());
1215 } else {
1216 Register tmp1 = op->tmp1()->as_register();
1217 Register tmp2 = op->tmp2()->as_register();
1218 Register tmp3 = op->tmp3()->as_register();
1219 if (len == tmp1) {
1220 tmp1 = tmp3;
1221 } else if (len == tmp2) {
1222 tmp2 = tmp3;
1223 } else if (len == tmp3) {
1224 // everything is ok
1225 } else {
1226 __ mov(tmp3, len);
1227 }
1228 __ allocate_array(op->obj()->as_register(),
1229 len,
1230 tmp1,
1231 tmp2,
1297 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1298 }
1299 Label profile_cast_success, profile_cast_failure;
1300 Label *success_target = should_profile ? &profile_cast_success : success;
1301 Label *failure_target = should_profile ? &profile_cast_failure : failure;
1302
1303 if (obj == k_RInfo) {
1304 k_RInfo = dst;
1305 } else if (obj == klass_RInfo) {
1306 klass_RInfo = dst;
1307 }
1308 if (k->is_loaded() && !UseCompressedClassPointers) {
1309 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1310 } else {
1311 Rtmp1 = op->tmp3()->as_register();
1312 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1313 }
1314
1315 assert_different_registers(obj, k_RInfo, klass_RInfo);
1316
1317 if (should_profile) {
1318 Label not_null;
1319 __ cbnz(obj, not_null);
1320 // Object is null; update MDO and exit
1321 Register mdo = klass_RInfo;
1322 __ mov_metadata(mdo, md->constant_encoding());
1323 Address data_addr
1324 = __ form_address(rscratch2, mdo,
1325 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1326 0);
1327 __ ldrb(rscratch1, data_addr);
1328 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1329 __ strb(rscratch1, data_addr);
1330 __ b(*obj_is_null);
1331 __ bind(not_null);
1332 } else {
1333 __ cbz(obj, *obj_is_null);
1334 }
1335
1336 if (!k->is_loaded()) {
1337 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1338 } else {
1339 __ mov_metadata(k_RInfo, k->constant_encoding());
1340 }
1341 __ verify_oop(obj);
1342
1343 if (op->fast_check()) {
1344 // get object class
1345 // not a safepoint as obj null check happens earlier
1346 __ load_klass(rscratch1, obj);
1347 __ cmp( rscratch1, k_RInfo);
1348
1349 __ br(Assembler::NE, *failure_target);
1350 // successful cast, fall through to profile or jump
1351 } else {
1352 // get object class
1353 // not a safepoint as obj null check happens earlier
1354 __ load_klass(klass_RInfo, obj);
1503 __ bind(success);
1504 if (dst != obj) {
1505 __ mov(dst, obj);
1506 }
1507 } else if (code == lir_instanceof) {
1508 Register obj = op->object()->as_register();
1509 Register dst = op->result_opr()->as_register();
1510 Label success, failure, done;
1511 emit_typecheck_helper(op, &success, &failure, &failure);
1512 __ bind(failure);
1513 __ mov(dst, zr);
1514 __ b(done);
1515 __ bind(success);
1516 __ mov(dst, 1);
1517 __ bind(done);
1518 } else {
1519 ShouldNotReachHere();
1520 }
1521 }
1522
1523 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1524 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1525 __ cset(rscratch1, Assembler::NE);
1526 __ membar(__ AnyAny);
1527 }
1528
1529 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1530 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1531 __ cset(rscratch1, Assembler::NE);
1532 __ membar(__ AnyAny);
1533 }
1534
1535
1536 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1537 assert(VM_Version::supports_cx8(), "wrong machine");
1538 Register addr;
1539 if (op->addr()->is_register()) {
1540 addr = as_reg(op->addr());
1541 } else {
1542 assert(op->addr()->is_address(), "what else?");
1944 }
1945
1946 if (opr2->is_constant()) {
1947 bool is_32bit = false; // width of register operand
1948 jlong imm;
1949
1950 switch(opr2->type()) {
1951 case T_INT:
1952 imm = opr2->as_constant_ptr()->as_jint();
1953 is_32bit = true;
1954 break;
1955 case T_LONG:
1956 imm = opr2->as_constant_ptr()->as_jlong();
1957 break;
1958 case T_ADDRESS:
1959 imm = opr2->as_constant_ptr()->as_jint();
1960 break;
1961 case T_METADATA:
1962 imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1963 break;
1964 case T_OBJECT:
1965 case T_ARRAY:
1966 jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1967 __ cmpoop(reg1, rscratch1);
1968 return;
1969 default:
1970 ShouldNotReachHere();
1971 imm = 0; // unreachable
1972 break;
1973 }
1974
1975 if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1976 if (is_32bit)
1977 __ cmpw(reg1, imm);
1978 else
1979 __ subs(zr, reg1, imm);
1980 return;
1981 } else {
1982 __ mov(rscratch1, imm);
1983 if (is_32bit)
2018 __ cmp(left->as_register_lo(), right->as_register_lo());
2019 __ mov(dst->as_register(), (uint64_t)-1L);
2020 __ br(Assembler::LT, done);
2021 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2022 __ bind(done);
2023 } else {
2024 ShouldNotReachHere();
2025 }
2026 }
2027
2028
2029 void LIR_Assembler::align_call(LIR_Code code) { }
2030
2031
2032 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2033 address call = __ trampoline_call(Address(op->addr(), rtype));
2034 if (call == NULL) {
2035 bailout("trampoline stub overflow");
2036 return;
2037 }
2038 add_call_info(code_offset(), op->info());
2039 __ post_call_nop();
2040 }
2041
2042
2043 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2044 address call = __ ic_call(op->addr());
2045 if (call == NULL) {
2046 bailout("trampoline stub overflow");
2047 return;
2048 }
2049 add_call_info(code_offset(), op->info());
2050 __ post_call_nop();
2051 }
2052
2053 void LIR_Assembler::emit_static_call_stub() {
2054 address call_pc = __ pc();
2055 address stub = __ start_a_stub(call_stub_size());
2056 if (stub == NULL) {
2057 bailout("static call stub overflow");
2058 return;
2059 }
2060
2061 int start = __ offset();
2062
2063 __ relocate(static_stub_Relocation::spec(call_pc));
2064 __ emit_static_call_stub();
2065
2066 assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2067 <= call_stub_size(), "stub too big");
2068 __ end_a_stub();
2069 }
2112 __ b(_unwind_handler_entry);
2113 }
2114
2115
2116 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2117 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2118 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2119
2120 switch (left->type()) {
2121 case T_INT: {
2122 switch (code) {
2123 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;
2124 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;
2125 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2126 default:
2127 ShouldNotReachHere();
2128 break;
2129 }
2130 break;
2131 case T_LONG:
2132 case T_ADDRESS:
2133 case T_OBJECT:
2134 switch (code) {
2135 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;
2136 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
2137 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2138 default:
2139 ShouldNotReachHere();
2140 break;
2141 }
2142 break;
2143 default:
2144 ShouldNotReachHere();
2145 break;
2146 }
2147 }
2148 }
2149
2150
2151 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2152 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2153 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2154
2155 switch (left->type()) {
2156 case T_INT: {
2157 switch (code) {
2158 case lir_shl: __ lslw (dreg, lreg, count); break;
2159 case lir_shr: __ asrw (dreg, lreg, count); break;
2160 case lir_ushr: __ lsrw (dreg, lreg, count); break;
2161 default:
2162 ShouldNotReachHere();
2163 break;
2164 }
2165 break;
2166 case T_LONG:
2167 case T_ADDRESS:
2168 case T_OBJECT:
2169 switch (code) {
2170 case lir_shl: __ lsl (dreg, lreg, count); break;
2171 case lir_shr: __ asr (dreg, lreg, count); break;
2172 case lir_ushr: __ lsr (dreg, lreg, count); break;
2173 default:
2174 ShouldNotReachHere();
2175 break;
2176 }
2177 break;
2178 default:
2179 ShouldNotReachHere();
2180 break;
2181 }
2182 }
2183 }
2184
2185
2186 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2187 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2192
2193
2194 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2195 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2196 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2197 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2198 __ mov (rscratch1, c);
2199 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2200 }
2201
2202
2203 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2204 ShouldNotReachHere();
2205 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2206 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2207 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2208 __ lea(rscratch1, __ constant_oop_address(o));
2209 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2210 }
2211
2212
2213 // This code replaces a call to arraycopy; no exception may
2214 // be thrown in this code, they must be thrown in the System.arraycopy
2215 // activation frame; we could save some checks if this would not be the case
2216 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2217 ciArrayKlass* default_type = op->expected_type();
2218 Register src = op->src()->as_register();
2219 Register dst = op->dst()->as_register();
2220 Register src_pos = op->src_pos()->as_register();
2221 Register dst_pos = op->dst_pos()->as_register();
2222 Register length = op->length()->as_register();
2223 Register tmp = op->tmp()->as_register();
2224
2225 CodeStub* stub = op->stub();
2226 int flags = op->flags();
2227 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2228 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2229
2230 // if we don't know anything, just go through the generic arraycopy
2231 if (default_type == NULL // || basic_type == T_OBJECT
2232 ) {
2233 Label done;
2234 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2235
2236 // Save the arguments in case the generic arraycopy fails and we
2237 // have to fall back to the JNI stub
2238 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2239 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2240 __ str(src, Address(sp, 4*BytesPerWord));
2241
2242 address copyfunc_addr = StubRoutines::generic_arraycopy();
2243 assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2244
2245 // The arguments are in java calling convention so we shift them
2246 // to C convention
2247 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2248 __ mov(c_rarg0, j_rarg0);
2249 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2263 __ cbz(r0, *stub->continuation());
2264
2265 // Reload values from the stack so they are where the stub
2266 // expects them.
2267 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2268 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2269 __ ldr(src, Address(sp, 4*BytesPerWord));
2270
2271 // r0 is -1^K where K == partial copied count
2272 __ eonw(rscratch1, r0, zr);
2273 // adjust length down and src/end pos up by partial copied count
2274 __ subw(length, length, rscratch1);
2275 __ addw(src_pos, src_pos, rscratch1);
2276 __ addw(dst_pos, dst_pos, rscratch1);
2277 __ b(*stub->entry());
2278
2279 __ bind(*stub->continuation());
2280 return;
2281 }
2282
2283 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2284
2285 int elem_size = type2aelembytes(basic_type);
2286 int scale = exact_log2(elem_size);
2287
2288 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2289 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2290 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2291 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2292
2293 // test for NULL
2294 if (flags & LIR_OpArrayCopy::src_null_check) {
2295 __ cbz(src, *stub->entry());
2296 }
2297 if (flags & LIR_OpArrayCopy::dst_null_check) {
2298 __ cbz(dst, *stub->entry());
2299 }
2300
2301 // If the compiler was not able to prove that exact type of the source or the destination
2302 // of the arraycopy is an array type, check at runtime if the source or the destination is
2837 // first time here. Set profile type.
2838 __ str(tmp, mdo_addr);
2839 } else {
2840 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2841 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2842
2843 __ ldr(tmp, mdo_addr);
2844 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2845
2846 __ orr(tmp, tmp, TypeEntries::type_unknown);
2847 __ str(tmp, mdo_addr);
2848 // FIXME: Write barrier needed here?
2849 }
2850 }
2851
2852 __ bind(next);
2853 }
2854 COMMENT("} emit_profile_type");
2855 }
2856
2857
2858 void LIR_Assembler::align_backward_branch_target() {
2859 }
2860
2861
2862 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2863 // tmp must be unused
2864 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2865
2866 if (left->is_single_cpu()) {
2867 assert(dest->is_single_cpu(), "expect single result reg");
2868 __ negw(dest->as_register(), left->as_register());
2869 } else if (left->is_double_cpu()) {
2870 assert(dest->is_double_cpu(), "expect double result reg");
2871 __ neg(dest->as_register_lo(), left->as_register_lo());
2872 } else if (left->is_single_fpu()) {
2873 assert(dest->is_single_fpu(), "expect single float result reg");
2874 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2875 } else {
2876 assert(left->is_double_fpu(), "expect double float operand reg");
2977 void LIR_Assembler::membar_loadload() {
2978 __ membar(Assembler::LoadLoad);
2979 }
2980
2981 void LIR_Assembler::membar_storestore() {
2982 __ membar(MacroAssembler::StoreStore);
2983 }
2984
2985 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2986
2987 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2988
2989 void LIR_Assembler::on_spin_wait() {
2990 __ spin_wait();
2991 }
2992
2993 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2994 __ mov(result_reg->as_register(), rthread);
2995 }
2996
2997
2998 void LIR_Assembler::peephole(LIR_List *lir) {
2999 #if 0
3000 if (tableswitch_count >= max_tableswitches)
3001 return;
3002
3003 /*
3004 This finite-state automaton recognizes sequences of compare-and-
3005 branch instructions. We will turn them into a tableswitch. You
3006 could argue that C1 really shouldn't be doing this sort of
3007 optimization, but without it the code is really horrible.
3008 */
3009
3010 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3011 int first_key, last_key = -2147483648;
3012 int next_key = 0;
3013 int start_insn = -1;
3014 int last_insn = -1;
3015 Register reg = noreg;
3016 LIR_Opr reg_opr;
3124 #endif
3125 }
3126
3127 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3128 Address addr = as_Address(src->as_address_ptr());
3129 BasicType type = src->type();
3130 bool is_oop = is_reference_type(type);
3131
3132 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3133 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3134
3135 switch(type) {
3136 case T_INT:
3137 xchg = &MacroAssembler::atomic_xchgalw;
3138 add = &MacroAssembler::atomic_addalw;
3139 break;
3140 case T_LONG:
3141 xchg = &MacroAssembler::atomic_xchgal;
3142 add = &MacroAssembler::atomic_addal;
3143 break;
3144 case T_OBJECT:
3145 case T_ARRAY:
3146 if (UseCompressedOops) {
3147 xchg = &MacroAssembler::atomic_xchgalw;
3148 add = &MacroAssembler::atomic_addalw;
3149 } else {
3150 xchg = &MacroAssembler::atomic_xchgal;
3151 add = &MacroAssembler::atomic_addal;
3152 }
3153 break;
3154 default:
3155 ShouldNotReachHere();
3156 xchg = &MacroAssembler::atomic_xchgal;
3157 add = &MacroAssembler::atomic_addal; // unreachable
3158 }
3159
3160 switch (code) {
3161 case lir_xadd:
3162 {
3163 RegisterOrConstant inc;
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInlineKlass.hpp"
37 #include "ci/ciInstance.hpp"
38 #include "code/compiledIC.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gc_globals.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_aarch64.inline.hpp"
49
50
51 #ifndef PRODUCT
52 #define COMMENT(x) do { __ block_comment(x); } while (0)
53 #else
54 #define COMMENT(x)
55 #endif
56
57 NEEDS_CLEANUP // remove this definitions ?
58 const Register IC_Klass = rscratch2; // where the IC klass is cached
59 const Register SYNC_header = r0; // synchronization header
60 const Register SHIFT_count = r0; // where count for shift operations must be
61
62 #define __ _masm->
63
436 if (UseHeavyMonitors) {
437 __ b(*stub->entry());
438 } else {
439 __ unlock_object(r5, r4, r0, *stub->entry());
440 }
441 __ bind(*stub->continuation());
442 }
443
444 if (compilation()->env()->dtrace_method_probes()) {
445 __ mov(c_rarg0, rthread);
446 __ mov_metadata(c_rarg1, method()->constant_encoding());
447 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
448 }
449
450 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
451 __ mov(r0, r19); // Restore the exception
452 }
453
454 // remove the activation and dispatch to the unwind handler
455 __ block_comment("remove_frame and dispatch to the unwind handler");
456 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
457 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
458
459 // Emit the slow path assembly
460 if (stub != NULL) {
461 stub->emit_code(this);
462 }
463
464 return offset;
465 }
466
467
468 int LIR_Assembler::emit_deopt_handler() {
469 // generate code for exception handler
470 address handler_base = __ start_a_stub(deopt_handler_size());
471 if (handler_base == NULL) {
472 // not enough space left for the handler
473 bailout("deopt handler overflow");
474 return -1;
475 }
476
480 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
481 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
482 __ end_a_stub();
483
484 return offset;
485 }
486
487 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
488 _masm->code_section()->relocate(adr, relocInfo::poll_type);
489 int pc_offset = code_offset();
490 flush_debug_info(pc_offset);
491 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
492 if (info->exception_handlers() != NULL) {
493 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
494 }
495 }
496
497 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
498 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
499
500 ciMethod* method = compilation()->method();
501 if (InlineTypeReturnedAsFields && method->return_type()->is_inlinetype()) {
502 ciInlineKlass* vk = method->return_type()->as_inline_klass();
503 if (vk->can_be_returned_as_fields()) {
504 address unpack_handler = vk->unpack_handler();
505 assert(unpack_handler != NULL, "must be");
506 __ far_call(RuntimeAddress(unpack_handler));
507 // At this point, r0 points to the value object (for interpreter or C1 caller).
508 // The fields of the object are copied into registers (for C2 caller).
509 }
510 }
511
512 // Pop the stack before the safepoint code
513 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
514
515 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
516 __ reserved_stack_check();
517 }
518
519 code_stub->set_safepoint_offset(__ offset());
520 __ relocate(relocInfo::poll_return_type);
521 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
522 __ ret(lr);
523 }
524
525 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
526 return (__ store_inline_type_fields_to_buf(vk, false));
527 }
528
529 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
530 guarantee(info != NULL, "Shouldn't be NULL");
531 __ get_polling_page(rscratch1, relocInfo::poll_type);
532 add_debug_info_for_branch(info); // This isn't just debug info:
533 // it's the oop map
534 __ read_polling_page(rscratch1, relocInfo::poll_type);
535 return __ offset();
536 }
537
538
539 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
540 if (from_reg == r31_sp)
541 from_reg = sp;
542 if (to_reg == r31_sp)
543 to_reg = sp;
544 __ mov(to_reg, from_reg);
545 }
546
547 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
548
554
555 switch (c->type()) {
556 case T_INT: {
557 assert(patch_code == lir_patch_none, "no patching handled here");
558 __ movw(dest->as_register(), c->as_jint());
559 break;
560 }
561
562 case T_ADDRESS: {
563 assert(patch_code == lir_patch_none, "no patching handled here");
564 __ mov(dest->as_register(), c->as_jint());
565 break;
566 }
567
568 case T_LONG: {
569 assert(patch_code == lir_patch_none, "no patching handled here");
570 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
571 break;
572 }
573
574 case T_PRIMITIVE_OBJECT:
575 case T_OBJECT: {
576 if (patch_code != lir_patch_none) {
577 jobject2reg_with_patching(dest->as_register(), info);
578 } else {
579 jobject2reg(c->as_jobject(), dest->as_register());
580 }
581 break;
582 }
583
584 case T_METADATA: {
585 if (patch_code != lir_patch_none) {
586 klass2reg_with_patching(dest->as_register(), info);
587 } else {
588 __ mov_metadata(dest->as_register(), c->as_metadata());
589 }
590 break;
591 }
592
593 case T_FLOAT: {
594 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
595 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
596 } else {
597 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
598 __ ldrs(dest->as_float_reg(), Address(rscratch1));
599 }
601 }
602
603 case T_DOUBLE: {
604 if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
605 __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
606 } else {
607 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
608 __ ldrd(dest->as_double_reg(), Address(rscratch1));
609 }
610 break;
611 }
612
613 default:
614 ShouldNotReachHere();
615 }
616 }
617
618 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
619 LIR_Const* c = src->as_constant_ptr();
620 switch (c->type()) {
621 case T_PRIMITIVE_OBJECT:
622 case T_OBJECT:
623 {
624 if (! c->as_jobject())
625 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
626 else {
627 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
628 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
629 }
630 }
631 break;
632 case T_ADDRESS:
633 {
634 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
635 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
636 }
637 case T_INT:
638 case T_FLOAT:
639 {
640 Register reg = zr;
641 if (c->as_jint_bits() == 0)
668 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
669 assert(src->is_constant(), "should not call otherwise");
670 LIR_Const* c = src->as_constant_ptr();
671 LIR_Address* to_addr = dest->as_address_ptr();
672
673 void (Assembler::* insn)(Register Rt, const Address &adr);
674
675 switch (type) {
676 case T_ADDRESS:
677 assert(c->as_jint() == 0, "should be");
678 insn = &Assembler::str;
679 break;
680 case T_LONG:
681 assert(c->as_jlong() == 0, "should be");
682 insn = &Assembler::str;
683 break;
684 case T_INT:
685 assert(c->as_jint() == 0, "should be");
686 insn = &Assembler::strw;
687 break;
688 case T_PRIMITIVE_OBJECT:
689 case T_OBJECT:
690 case T_ARRAY:
691 // Non-null case is not handled on aarch64 but handled on x86
692 // FIXME: do we need to add it here?
693 assert(c->as_jobject() == 0, "should be");
694 if (UseCompressedOops && !wide) {
695 insn = &Assembler::strw;
696 } else {
697 insn = &Assembler::str;
698 }
699 break;
700 case T_CHAR:
701 case T_SHORT:
702 assert(c->as_jint() == 0, "should be");
703 insn = &Assembler::strh;
704 break;
705 case T_BOOLEAN:
706 case T_BYTE:
707 assert(c->as_jint() == 0, "should be");
708 insn = &Assembler::strb;
709 break;
710 default:
711 ShouldNotReachHere();
712 insn = &Assembler::str; // unreachable
713 }
714
715 if (info) add_debug_info_for_null_check_here(info);
716 (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
717 }
718
719 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
720 assert(src->is_register(), "should not call otherwise");
721 assert(dest->is_register(), "should not call otherwise");
722
723 // move between cpu-registers
724 if (dest->is_single_cpu()) {
725 if (src->type() == T_LONG) {
726 // Can do LONG -> OBJECT
727 move_regs(src->as_register_lo(), dest->as_register());
728 return;
729 }
730 assert(src->is_single_cpu(), "must match");
731 if (src->type() == T_OBJECT || src->type() == T_PRIMITIVE_OBJECT) {
732 __ verify_oop(src->as_register());
733 }
734 move_regs(src->as_register(), dest->as_register());
735
736 } else if (dest->is_double_cpu()) {
737 if (is_reference_type(src->type())) {
738 // Surprising to me but we can see move of a long to t_object
739 __ verify_oop(src->as_register());
740 move_regs(src->as_register(), dest->as_register_lo());
741 return;
742 }
743 assert(src->is_double_cpu(), "must match");
744 Register f_lo = src->as_register_lo();
745 Register f_hi = src->as_register_hi();
746 Register t_lo = dest->as_register_lo();
747 Register t_hi = dest->as_register_hi();
748 assert(f_hi == f_lo, "must be same");
749 assert(t_hi == t_lo, "must be same");
750 move_regs(f_lo, t_lo);
751
811
812 if (UseCompressedOops && !wide) {
813 __ encode_heap_oop(compressed_src, src->as_register());
814 } else {
815 compressed_src = src->as_register();
816 }
817 }
818
819 int null_check_here = code_offset();
820 switch (type) {
821 case T_FLOAT: {
822 __ strs(src->as_float_reg(), as_Address(to_addr));
823 break;
824 }
825
826 case T_DOUBLE: {
827 __ strd(src->as_double_reg(), as_Address(to_addr));
828 break;
829 }
830
831 case T_PRIMITIVE_OBJECT: // fall through
832 case T_ARRAY: // fall through
833 case T_OBJECT: // fall through
834 if (UseCompressedOops && !wide) {
835 __ strw(compressed_src, as_Address(to_addr, rscratch2));
836 } else {
837 __ str(compressed_src, as_Address(to_addr));
838 }
839 break;
840 case T_METADATA:
841 // We get here to store a method pointer to the stack to pass to
842 // a dtrace runtime call. This can't work on 64 bit with
843 // compressed klass ptrs: T_METADATA can be a compressed klass
844 // ptr or a 64 bit method pointer.
845 ShouldNotReachHere();
846 __ str(src->as_register(), as_Address(to_addr));
847 break;
848 case T_ADDRESS:
849 __ str(src->as_register(), as_Address(to_addr));
850 break;
851 case T_INT:
941 add_call_info_here(info);
942 }
943
944 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
945
946 LIR_Opr temp;
947 if (type == T_LONG || type == T_DOUBLE)
948 temp = FrameMap::rscratch1_long_opr;
949 else
950 temp = FrameMap::rscratch1_opr;
951
952 stack2reg(src, temp, src->type());
953 reg2stack(temp, dest, dest->type(), false);
954 }
955
956
957 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
958 LIR_Address* addr = src->as_address_ptr();
959 LIR_Address* from_addr = src->as_address_ptr();
960
961 if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_PRIMITIVE_OBJECT) {
962 __ verify_oop(addr->base()->as_pointer_register());
963 }
964
965 if (patch_code != lir_patch_none) {
966 deoptimize_trap(info);
967 return;
968 }
969
970 if (info != NULL) {
971 add_debug_info_for_null_check_here(info);
972 }
973 int null_check_here = code_offset();
974 switch (type) {
975 case T_FLOAT: {
976 __ ldrs(dest->as_float_reg(), as_Address(from_addr));
977 break;
978 }
979
980 case T_DOUBLE: {
981 __ ldrd(dest->as_double_reg(), as_Address(from_addr));
982 break;
983 }
984
985 case T_PRIMITIVE_OBJECT: // fall through
986 case T_ARRAY: // fall through
987 case T_OBJECT: // fall through
988 if (UseCompressedOops && !wide) {
989 __ ldrw(dest->as_register(), as_Address(from_addr));
990 } else {
991 __ ldr(dest->as_register(), as_Address(from_addr));
992 }
993 break;
994 case T_METADATA:
995 // We get here to store a method pointer to the stack to pass to
996 // a dtrace runtime call. This can't work on 64 bit with
997 // compressed klass ptrs: T_METADATA can be a compressed klass
998 // ptr or a 64 bit method pointer.
999 ShouldNotReachHere();
1000 __ ldr(dest->as_register(), as_Address(from_addr));
1001 break;
1002 case T_ADDRESS:
1003 __ ldr(dest->as_register(), as_Address(from_addr));
1004 break;
1005 case T_INT:
1025 case T_SHORT:
1026 __ ldrsh(dest->as_register(), as_Address(from_addr));
1027 break;
1028
1029 default:
1030 ShouldNotReachHere();
1031 }
1032
1033 if (is_reference_type(type)) {
1034 if (UseCompressedOops && !wide) {
1035 __ decode_heap_oop(dest->as_register());
1036 }
1037
1038 if (!UseZGC) {
1039 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1040 __ verify_oop(dest->as_register());
1041 }
1042 }
1043 }
1044
1045 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1046 assert(dst->is_cpu_register(), "must be");
1047 assert(dst->type() == src->type(), "must be");
1048
1049 if (src->is_cpu_register()) {
1050 reg2reg(src, dst);
1051 } else if (src->is_stack()) {
1052 stack2reg(src, dst, dst->type());
1053 } else if (src->is_constant()) {
1054 const2reg(src, dst, lir_patch_none, NULL);
1055 } else {
1056 ShouldNotReachHere();
1057 }
1058 }
1059
1060 int LIR_Assembler::array_element_size(BasicType type) const {
1061 int elem_size = type2aelembytes(type);
1062 return exact_log2(elem_size);
1063 }
1064
1065
1066 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1067 switch (op->code()) {
1068 case lir_idiv:
1069 case lir_irem:
1070 arithmetic_idiv(op->code(),
1071 op->in_opr1(),
1072 op->in_opr2(),
1073 op->in_opr3(),
1074 op->result_opr(),
1075 op->info());
1076 break;
1077 case lir_fmad:
1078 __ fmaddd(op->result_opr()->as_double_reg(),
1230 __ ldrb(rscratch1, Address(op->klass()->as_register(),
1231 InstanceKlass::init_state_offset()));
1232 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1233 add_debug_info_for_null_check_here(op->stub()->info());
1234 __ br(Assembler::NE, *op->stub()->entry());
1235 }
1236 __ allocate_object(op->obj()->as_register(),
1237 op->tmp1()->as_register(),
1238 op->tmp2()->as_register(),
1239 op->header_size(),
1240 op->object_size(),
1241 op->klass()->as_register(),
1242 *op->stub()->entry());
1243 __ bind(*op->stub()->continuation());
1244 }
1245
1246 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1247 Register len = op->len()->as_register();
1248 __ uxtw(len, len);
1249
1250 if (UseSlowPath || op->type() == T_PRIMITIVE_OBJECT ||
1251 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1252 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1253 __ b(*op->stub()->entry());
1254 } else {
1255 Register tmp1 = op->tmp1()->as_register();
1256 Register tmp2 = op->tmp2()->as_register();
1257 Register tmp3 = op->tmp3()->as_register();
1258 if (len == tmp1) {
1259 tmp1 = tmp3;
1260 } else if (len == tmp2) {
1261 tmp2 = tmp3;
1262 } else if (len == tmp3) {
1263 // everything is ok
1264 } else {
1265 __ mov(tmp3, len);
1266 }
1267 __ allocate_array(op->obj()->as_register(),
1268 len,
1269 tmp1,
1270 tmp2,
1336 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1337 }
1338 Label profile_cast_success, profile_cast_failure;
1339 Label *success_target = should_profile ? &profile_cast_success : success;
1340 Label *failure_target = should_profile ? &profile_cast_failure : failure;
1341
1342 if (obj == k_RInfo) {
1343 k_RInfo = dst;
1344 } else if (obj == klass_RInfo) {
1345 klass_RInfo = dst;
1346 }
1347 if (k->is_loaded() && !UseCompressedClassPointers) {
1348 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1349 } else {
1350 Rtmp1 = op->tmp3()->as_register();
1351 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1352 }
1353
1354 assert_different_registers(obj, k_RInfo, klass_RInfo);
1355
1356 if (op->need_null_check()) {
1357 if (should_profile) {
1358 Label not_null;
1359 __ cbnz(obj, not_null);
1360 // Object is null; update MDO and exit
1361 Register mdo = klass_RInfo;
1362 __ mov_metadata(mdo, md->constant_encoding());
1363 Address data_addr
1364 = __ form_address(rscratch2, mdo,
1365 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1366 0);
1367 __ ldrb(rscratch1, data_addr);
1368 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1369 __ strb(rscratch1, data_addr);
1370 __ b(*obj_is_null);
1371 __ bind(not_null);
1372 } else {
1373 __ cbz(obj, *obj_is_null);
1374 }
1375 }
1376
1377 if (!k->is_loaded()) {
1378 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1379 } else {
1380 __ mov_metadata(k_RInfo, k->constant_encoding());
1381 }
1382 __ verify_oop(obj);
1383
1384 if (op->fast_check()) {
1385 // get object class
1386 // not a safepoint as obj null check happens earlier
1387 __ load_klass(rscratch1, obj);
1388 __ cmp( rscratch1, k_RInfo);
1389
1390 __ br(Assembler::NE, *failure_target);
1391 // successful cast, fall through to profile or jump
1392 } else {
1393 // get object class
1394 // not a safepoint as obj null check happens earlier
1395 __ load_klass(klass_RInfo, obj);
1544 __ bind(success);
1545 if (dst != obj) {
1546 __ mov(dst, obj);
1547 }
1548 } else if (code == lir_instanceof) {
1549 Register obj = op->object()->as_register();
1550 Register dst = op->result_opr()->as_register();
1551 Label success, failure, done;
1552 emit_typecheck_helper(op, &success, &failure, &failure);
1553 __ bind(failure);
1554 __ mov(dst, zr);
1555 __ b(done);
1556 __ bind(success);
1557 __ mov(dst, 1);
1558 __ bind(done);
1559 } else {
1560 ShouldNotReachHere();
1561 }
1562 }
1563
1564 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1565 // We are loading/storing from/to an array that *may* be flattened (the
1566 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1567 // If this array is flattened, take the slow path.
1568
1569 Register klass = op->tmp()->as_register();
1570 if (UseArrayMarkWordCheck) {
1571 __ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1572 } else {
1573 __ load_klass(klass, op->array()->as_register());
1574 __ ldrw(klass, Address(klass, Klass::layout_helper_offset()));
1575 __ tst(klass, Klass::_lh_array_tag_flat_value_bit_inplace);
1576 __ br(Assembler::NE, *op->stub()->entry());
1577 }
1578 if (!op->value()->is_illegal()) {
1579 // The array is not flattened, but it might be null-free. If we are storing
1580 // a null into a null-free array, take the slow path (which will throw NPE).
1581 Label skip;
1582 __ cbnz(op->value()->as_register(), skip);
1583 if (UseArrayMarkWordCheck) {
1584 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1585 } else {
1586 __ tst(klass, Klass::_lh_null_free_array_bit_inplace);
1587 __ br(Assembler::NE, *op->stub()->entry());
1588 }
1589 __ bind(skip);
1590 }
1591 }
1592
1593 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1594 // We are storing into an array that *may* be null-free (the declared type is
1595 // Object[], abstract[], interface[] or VT.ref[]).
1596 if (UseArrayMarkWordCheck) {
1597 Label test_mark_word;
1598 Register tmp = op->tmp()->as_register();
1599 __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1600 __ tst(tmp, markWord::unlocked_value);
1601 __ br(Assembler::NE, test_mark_word);
1602 __ load_prototype_header(tmp, op->array()->as_register());
1603 __ bind(test_mark_word);
1604 __ tst(tmp, markWord::null_free_array_bit_in_place);
1605 } else {
1606 Register klass = op->tmp()->as_register();
1607 __ load_klass(klass, op->array()->as_register());
1608 __ ldr(klass, Address(klass, Klass::layout_helper_offset()));
1609 __ tst(klass, Klass::_lh_null_free_array_bit_inplace);
1610 }
1611 }
1612
1613 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1614 Label L_oops_equal;
1615 Label L_oops_not_equal;
1616 Label L_end;
1617
1618 Register left = op->left()->as_register();
1619 Register right = op->right()->as_register();
1620
1621 __ cmp(left, right);
1622 __ br(Assembler::EQ, L_oops_equal);
1623
1624 // (1) Null check -- if one of the operands is null, the other must not be null (because
1625 // the two references are not equal), so they are not substitutable,
1626 // FIXME: do null check only if the operand is nullable
1627 {
1628 __ cbz(left, L_oops_not_equal);
1629 __ cbz(right, L_oops_not_equal);
1630 }
1631
1632 ciKlass* left_klass = op->left_klass();
1633 ciKlass* right_klass = op->right_klass();
1634
1635 // (2) Inline type check -- if either of the operands is not a inline type,
1636 // they are not substitutable. We do this only if we are not sure that the
1637 // operands are inline type
1638 if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
1639 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1640 Register tmp1 = op->tmp1()->as_register();
1641 __ mov(tmp1, markWord::inline_type_pattern);
1642 __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1643 __ andr(tmp1, tmp1, rscratch1);
1644 __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1645 __ andr(tmp1, tmp1, rscratch1);
1646 __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1647 __ br(Assembler::NE, L_oops_not_equal);
1648 }
1649
1650 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1651 if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
1652 // No need to load klass -- the operands are statically known to be the same inline klass.
1653 __ b(*op->stub()->entry());
1654 } else {
1655 Register left_klass_op = op->left_klass_op()->as_register();
1656 Register right_klass_op = op->right_klass_op()->as_register();
1657
1658 if (UseCompressedClassPointers) {
1659 __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1660 __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1661 __ cmpw(left_klass_op, right_klass_op);
1662 } else {
1663 __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1664 __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1665 __ cmp(left_klass_op, right_klass_op);
1666 }
1667
1668 __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1669 // fall through to L_oops_not_equal
1670 }
1671
1672 __ bind(L_oops_not_equal);
1673 move(op->not_equal_result(), op->result_opr());
1674 __ b(L_end);
1675
1676 __ bind(L_oops_equal);
1677 move(op->equal_result(), op->result_opr());
1678 __ b(L_end);
1679
1680 // We've returned from the stub. R0 contains 0x0 IFF the two
1681 // operands are not substitutable. (Don't compare against 0x1 in case the
1682 // C compiler is naughty)
1683 __ bind(*op->stub()->continuation());
1684 __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1685 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1686 // fall-through
1687 __ bind(L_end);
1688 }
1689
1690
1691 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1692 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1693 __ cset(rscratch1, Assembler::NE);
1694 __ membar(__ AnyAny);
1695 }
1696
1697 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1698 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1699 __ cset(rscratch1, Assembler::NE);
1700 __ membar(__ AnyAny);
1701 }
1702
1703
1704 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1705 assert(VM_Version::supports_cx8(), "wrong machine");
1706 Register addr;
1707 if (op->addr()->is_register()) {
1708 addr = as_reg(op->addr());
1709 } else {
1710 assert(op->addr()->is_address(), "what else?");
2112 }
2113
2114 if (opr2->is_constant()) {
2115 bool is_32bit = false; // width of register operand
2116 jlong imm;
2117
2118 switch(opr2->type()) {
2119 case T_INT:
2120 imm = opr2->as_constant_ptr()->as_jint();
2121 is_32bit = true;
2122 break;
2123 case T_LONG:
2124 imm = opr2->as_constant_ptr()->as_jlong();
2125 break;
2126 case T_ADDRESS:
2127 imm = opr2->as_constant_ptr()->as_jint();
2128 break;
2129 case T_METADATA:
2130 imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
2131 break;
2132 case T_PRIMITIVE_OBJECT:
2133 case T_OBJECT:
2134 case T_ARRAY:
2135 jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
2136 __ cmpoop(reg1, rscratch1);
2137 return;
2138 default:
2139 ShouldNotReachHere();
2140 imm = 0; // unreachable
2141 break;
2142 }
2143
2144 if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
2145 if (is_32bit)
2146 __ cmpw(reg1, imm);
2147 else
2148 __ subs(zr, reg1, imm);
2149 return;
2150 } else {
2151 __ mov(rscratch1, imm);
2152 if (is_32bit)
2187 __ cmp(left->as_register_lo(), right->as_register_lo());
2188 __ mov(dst->as_register(), (uint64_t)-1L);
2189 __ br(Assembler::LT, done);
2190 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2191 __ bind(done);
2192 } else {
2193 ShouldNotReachHere();
2194 }
2195 }
2196
2197
2198 void LIR_Assembler::align_call(LIR_Code code) { }
2199
2200
2201 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2202 address call = __ trampoline_call(Address(op->addr(), rtype));
2203 if (call == NULL) {
2204 bailout("trampoline stub overflow");
2205 return;
2206 }
2207 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2208 __ post_call_nop();
2209 }
2210
2211
2212 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2213 address call = __ ic_call(op->addr());
2214 if (call == NULL) {
2215 bailout("trampoline stub overflow");
2216 return;
2217 }
2218 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2219 __ post_call_nop();
2220 }
2221
2222 void LIR_Assembler::emit_static_call_stub() {
2223 address call_pc = __ pc();
2224 address stub = __ start_a_stub(call_stub_size());
2225 if (stub == NULL) {
2226 bailout("static call stub overflow");
2227 return;
2228 }
2229
2230 int start = __ offset();
2231
2232 __ relocate(static_stub_Relocation::spec(call_pc));
2233 __ emit_static_call_stub();
2234
2235 assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2236 <= call_stub_size(), "stub too big");
2237 __ end_a_stub();
2238 }
2281 __ b(_unwind_handler_entry);
2282 }
2283
2284
2285 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2286 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2287 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2288
2289 switch (left->type()) {
2290 case T_INT: {
2291 switch (code) {
2292 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;
2293 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;
2294 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2295 default:
2296 ShouldNotReachHere();
2297 break;
2298 }
2299 break;
2300 case T_LONG:
2301 case T_PRIMITIVE_OBJECT:
2302 case T_ADDRESS:
2303 case T_OBJECT:
2304 switch (code) {
2305 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;
2306 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
2307 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2308 default:
2309 ShouldNotReachHere();
2310 break;
2311 }
2312 break;
2313 default:
2314 ShouldNotReachHere();
2315 break;
2316 }
2317 }
2318 }
2319
2320
2321 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2322 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2323 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2324
2325 switch (left->type()) {
2326 case T_INT: {
2327 switch (code) {
2328 case lir_shl: __ lslw (dreg, lreg, count); break;
2329 case lir_shr: __ asrw (dreg, lreg, count); break;
2330 case lir_ushr: __ lsrw (dreg, lreg, count); break;
2331 default:
2332 ShouldNotReachHere();
2333 break;
2334 }
2335 break;
2336 case T_LONG:
2337 case T_ADDRESS:
2338 case T_PRIMITIVE_OBJECT:
2339 case T_OBJECT:
2340 switch (code) {
2341 case lir_shl: __ lsl (dreg, lreg, count); break;
2342 case lir_shr: __ asr (dreg, lreg, count); break;
2343 case lir_ushr: __ lsr (dreg, lreg, count); break;
2344 default:
2345 ShouldNotReachHere();
2346 break;
2347 }
2348 break;
2349 default:
2350 ShouldNotReachHere();
2351 break;
2352 }
2353 }
2354 }
2355
2356
2357 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2358 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2363
2364
2365 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2366 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2367 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2368 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2369 __ mov (rscratch1, c);
2370 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2371 }
2372
2373
2374 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2375 ShouldNotReachHere();
2376 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2377 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2378 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2379 __ lea(rscratch1, __ constant_oop_address(o));
2380 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2381 }
2382
2383 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2384 if (null_check) {
2385 __ cbz(obj, *slow_path->entry());
2386 }
2387 if (UseArrayMarkWordCheck) {
2388 if (is_dest) {
2389 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2390 } else {
2391 __ test_flattened_array_oop(obj, tmp, *slow_path->entry());
2392 }
2393 } else {
2394 __ load_klass(tmp, obj);
2395 __ ldr(tmp, Address(tmp, Klass::layout_helper_offset()));
2396 if (is_dest) {
2397 // Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
2398 __ tst(tmp, Klass::_lh_null_free_array_bit_inplace);
2399 } else {
2400 __ tst(tmp, Klass::_lh_array_tag_flat_value_bit_inplace);
2401 }
2402 __ br(Assembler::NE, *slow_path->entry());
2403 }
2404 }
2405
2406 // This code replaces a call to arraycopy; no exception may
2407 // be thrown in this code, they must be thrown in the System.arraycopy
2408 // activation frame; we could save some checks if this would not be the case
2409 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2410 ciArrayKlass* default_type = op->expected_type();
2411 Register src = op->src()->as_register();
2412 Register dst = op->dst()->as_register();
2413 Register src_pos = op->src_pos()->as_register();
2414 Register dst_pos = op->dst_pos()->as_register();
2415 Register length = op->length()->as_register();
2416 Register tmp = op->tmp()->as_register();
2417
2418 CodeStub* stub = op->stub();
2419 int flags = op->flags();
2420 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2421 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2422
2423 if (flags & LIR_OpArrayCopy::always_slow_path) {
2424 __ b(*stub->entry());
2425 __ bind(*stub->continuation());
2426 return;
2427 }
2428
2429 // if we don't know anything, just go through the generic arraycopy
2430 if (default_type == NULL // || basic_type == T_OBJECT
2431 ) {
2432 Label done;
2433 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2434
2435 // Save the arguments in case the generic arraycopy fails and we
2436 // have to fall back to the JNI stub
2437 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2438 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2439 __ str(src, Address(sp, 4*BytesPerWord));
2440
2441 address copyfunc_addr = StubRoutines::generic_arraycopy();
2442 assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2443
2444 // The arguments are in java calling convention so we shift them
2445 // to C convention
2446 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2447 __ mov(c_rarg0, j_rarg0);
2448 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2462 __ cbz(r0, *stub->continuation());
2463
2464 // Reload values from the stack so they are where the stub
2465 // expects them.
2466 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2467 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2468 __ ldr(src, Address(sp, 4*BytesPerWord));
2469
2470 // r0 is -1^K where K == partial copied count
2471 __ eonw(rscratch1, r0, zr);
2472 // adjust length down and src/end pos up by partial copied count
2473 __ subw(length, length, rscratch1);
2474 __ addw(src_pos, src_pos, rscratch1);
2475 __ addw(dst_pos, dst_pos, rscratch1);
2476 __ b(*stub->entry());
2477
2478 __ bind(*stub->continuation());
2479 return;
2480 }
2481
2482 // Handle inline type arrays
2483 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2484 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2485 }
2486
2487 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2488 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2489 }
2490
2491 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2492
2493 int elem_size = type2aelembytes(basic_type);
2494 int scale = exact_log2(elem_size);
2495
2496 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2497 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2498 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2499 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2500
2501 // test for NULL
2502 if (flags & LIR_OpArrayCopy::src_null_check) {
2503 __ cbz(src, *stub->entry());
2504 }
2505 if (flags & LIR_OpArrayCopy::dst_null_check) {
2506 __ cbz(dst, *stub->entry());
2507 }
2508
2509 // If the compiler was not able to prove that exact type of the source or the destination
2510 // of the arraycopy is an array type, check at runtime if the source or the destination is
3045 // first time here. Set profile type.
3046 __ str(tmp, mdo_addr);
3047 } else {
3048 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3049 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3050
3051 __ ldr(tmp, mdo_addr);
3052 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
3053
3054 __ orr(tmp, tmp, TypeEntries::type_unknown);
3055 __ str(tmp, mdo_addr);
3056 // FIXME: Write barrier needed here?
3057 }
3058 }
3059
3060 __ bind(next);
3061 }
3062 COMMENT("} emit_profile_type");
3063 }
3064
3065 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3066 Register obj = op->obj()->as_register();
3067 Register tmp = op->tmp()->as_pointer_register();
3068 bool not_null = op->not_null();
3069 int flag = op->flag();
3070
3071 Label not_inline_type;
3072 if (!not_null) {
3073 __ cbz(obj, not_inline_type);
3074 }
3075
3076 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3077
3078 Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
3079 __ ldrb(rscratch1, mdo_addr);
3080 __ orr(rscratch1, rscratch1, flag);
3081 __ strb(rscratch1, mdo_addr);
3082
3083 __ bind(not_inline_type);
3084 }
3085
3086 void LIR_Assembler::align_backward_branch_target() {
3087 }
3088
3089
3090 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3091 // tmp must be unused
3092 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3093
3094 if (left->is_single_cpu()) {
3095 assert(dest->is_single_cpu(), "expect single result reg");
3096 __ negw(dest->as_register(), left->as_register());
3097 } else if (left->is_double_cpu()) {
3098 assert(dest->is_double_cpu(), "expect double result reg");
3099 __ neg(dest->as_register_lo(), left->as_register_lo());
3100 } else if (left->is_single_fpu()) {
3101 assert(dest->is_single_fpu(), "expect single float result reg");
3102 __ fnegs(dest->as_float_reg(), left->as_float_reg());
3103 } else {
3104 assert(left->is_double_fpu(), "expect double float operand reg");
3205 void LIR_Assembler::membar_loadload() {
3206 __ membar(Assembler::LoadLoad);
3207 }
3208
3209 void LIR_Assembler::membar_storestore() {
3210 __ membar(MacroAssembler::StoreStore);
3211 }
3212
3213 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3214
3215 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3216
3217 void LIR_Assembler::on_spin_wait() {
3218 __ spin_wait();
3219 }
3220
3221 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3222 __ mov(result_reg->as_register(), rthread);
3223 }
3224
3225 void LIR_Assembler::check_orig_pc() {
3226 __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3227 __ cmp(rscratch2, (u1)NULL_WORD);
3228 }
3229
3230 void LIR_Assembler::peephole(LIR_List *lir) {
3231 #if 0
3232 if (tableswitch_count >= max_tableswitches)
3233 return;
3234
3235 /*
3236 This finite-state automaton recognizes sequences of compare-and-
3237 branch instructions. We will turn them into a tableswitch. You
3238 could argue that C1 really shouldn't be doing this sort of
3239 optimization, but without it the code is really horrible.
3240 */
3241
3242 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3243 int first_key, last_key = -2147483648;
3244 int next_key = 0;
3245 int start_insn = -1;
3246 int last_insn = -1;
3247 Register reg = noreg;
3248 LIR_Opr reg_opr;
3356 #endif
3357 }
3358
3359 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3360 Address addr = as_Address(src->as_address_ptr());
3361 BasicType type = src->type();
3362 bool is_oop = is_reference_type(type);
3363
3364 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3365 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3366
3367 switch(type) {
3368 case T_INT:
3369 xchg = &MacroAssembler::atomic_xchgalw;
3370 add = &MacroAssembler::atomic_addalw;
3371 break;
3372 case T_LONG:
3373 xchg = &MacroAssembler::atomic_xchgal;
3374 add = &MacroAssembler::atomic_addal;
3375 break;
3376 case T_PRIMITIVE_OBJECT:
3377 case T_OBJECT:
3378 case T_ARRAY:
3379 if (UseCompressedOops) {
3380 xchg = &MacroAssembler::atomic_xchgalw;
3381 add = &MacroAssembler::atomic_addalw;
3382 } else {
3383 xchg = &MacroAssembler::atomic_xchgal;
3384 add = &MacroAssembler::atomic_addal;
3385 }
3386 break;
3387 default:
3388 ShouldNotReachHere();
3389 xchg = &MacroAssembler::atomic_xchgal;
3390 add = &MacroAssembler::atomic_addal; // unreachable
3391 }
3392
3393 switch (code) {
3394 case lir_xadd:
3395 {
3396 RegisterOrConstant inc;
|