< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"

  35 #include "ci/ciInstance.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_aarch64.hpp"
  40 #include "oops/objArrayKlass.hpp"

  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_aarch64.inline.hpp"
  46 
  47 
  48 #ifndef PRODUCT
  49 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  50 #else
  51 #define COMMENT(x)
  52 #endif
  53 
  54 NEEDS_CLEANUP // remove this definitions ?
  55 const Register SYNC_header = r0;   // synchronization header
  56 const Register SHIFT_count = r0;   // where count for shift operations must be
  57 
  58 #define __ _masm->
  59 
  60 

 409   MonitorExitStub* stub = nullptr;
 410   if (method()->is_synchronized()) {
 411     monitor_address(0, FrameMap::r0_opr);
 412     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 413     __ unlock_object(r5, r4, r0, r6, *stub->entry());
 414     __ bind(*stub->continuation());
 415   }
 416 
 417   if (compilation()->env()->dtrace_method_probes()) {
 418     __ mov(c_rarg0, rthread);
 419     __ mov_metadata(c_rarg1, method()->constant_encoding());
 420     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 421   }
 422 
 423   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 424     __ mov(r0, r19);  // Restore the exception
 425   }
 426 
 427   // remove the activation and dispatch to the unwind handler
 428   __ block_comment("remove_frame and dispatch to the unwind handler");
 429   __ remove_frame(initial_frame_size_in_bytes());
 430   __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
 431 
 432   // Emit the slow path assembly
 433   if (stub != nullptr) {
 434     stub->emit_code(this);
 435   }
 436 
 437   return offset;
 438 }
 439 
 440 
 441 int LIR_Assembler::emit_deopt_handler() {
 442   // generate code for exception handler
 443   address handler_base = __ start_a_stub(deopt_handler_size());
 444   if (handler_base == nullptr) {
 445     // not enough space left for the handler
 446     bailout("deopt handler overflow");
 447     return -1;
 448   }
 449 

 453   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 454   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 455   __ end_a_stub();
 456 
 457   return offset;
 458 }
 459 
 460 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 461   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 462   int pc_offset = code_offset();
 463   flush_debug_info(pc_offset);
 464   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 465   if (info->exception_handlers() != nullptr) {
 466     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 467   }
 468 }
 469 
 470 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 471   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 472 










































 473   // Pop the stack before the safepoint code
 474   __ remove_frame(initial_frame_size_in_bytes());
 475 
 476   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 477     __ reserved_stack_check();
 478   }
 479 
 480   code_stub->set_safepoint_offset(__ offset());
 481   __ relocate(relocInfo::poll_return_type);
 482   __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
 483   __ ret(lr);
 484 }
 485 




 486 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 487   guarantee(info != nullptr, "Shouldn't be null");
 488   __ get_polling_page(rscratch1, relocInfo::poll_type);
 489   add_debug_info_for_branch(info);  // This isn't just debug info:
 490                                     // it's the oop map
 491   __ read_polling_page(rscratch1, relocInfo::poll_type);
 492   return __ offset();
 493 }
 494 
 495 
 496 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 497   if (from_reg == r31_sp)
 498     from_reg = sp;
 499   if (to_reg == r31_sp)
 500     to_reg = sp;
 501   __ mov(to_reg, from_reg);
 502 }
 503 
 504 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 505 

 512   switch (c->type()) {
 513     case T_INT: {
 514       assert(patch_code == lir_patch_none, "no patching handled here");
 515       __ movw(dest->as_register(), c->as_jint());
 516       break;
 517     }
 518 
 519     case T_ADDRESS: {
 520       assert(patch_code == lir_patch_none, "no patching handled here");
 521       __ mov(dest->as_register(), c->as_jint());
 522       break;
 523     }
 524 
 525     case T_LONG: {
 526       assert(patch_code == lir_patch_none, "no patching handled here");
 527       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 528       break;
 529     }
 530 
 531     case T_OBJECT: {
 532         if (patch_code == lir_patch_none) {
 533           jobject2reg(c->as_jobject(), dest->as_register());
 534         } else {
 535           jobject2reg_with_patching(dest->as_register(), info);


 536         }
 537       break;
 538     }
 539 
 540     case T_METADATA: {
 541       if (patch_code != lir_patch_none) {
 542         klass2reg_with_patching(dest->as_register(), info);
 543       } else {
 544         __ mov_metadata(dest->as_register(), c->as_metadata());
 545       }
 546       break;
 547     }
 548 
 549     case T_FLOAT: {
 550       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 551         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 552       } else {
 553         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 554         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 555       }

 625   LIR_Const* c = src->as_constant_ptr();
 626   LIR_Address* to_addr = dest->as_address_ptr();
 627 
 628   void (Assembler::* insn)(Register Rt, const Address &adr);
 629 
 630   switch (type) {
 631   case T_ADDRESS:
 632     assert(c->as_jint() == 0, "should be");
 633     insn = &Assembler::str;
 634     break;
 635   case T_LONG:
 636     assert(c->as_jlong() == 0, "should be");
 637     insn = &Assembler::str;
 638     break;
 639   case T_INT:
 640     assert(c->as_jint() == 0, "should be");
 641     insn = &Assembler::strw;
 642     break;
 643   case T_OBJECT:
 644   case T_ARRAY:


 645     assert(c->as_jobject() == nullptr, "should be");
 646     if (UseCompressedOops && !wide) {
 647       insn = &Assembler::strw;
 648     } else {
 649       insn = &Assembler::str;
 650     }
 651     break;
 652   case T_CHAR:
 653   case T_SHORT:
 654     assert(c->as_jint() == 0, "should be");
 655     insn = &Assembler::strh;
 656     break;
 657   case T_BOOLEAN:
 658   case T_BYTE:
 659     assert(c->as_jint() == 0, "should be");
 660     insn = &Assembler::strb;
 661     break;
 662   default:
 663     ShouldNotReachHere();
 664     insn = &Assembler::str;  // unreachable

 972     case T_CHAR:
 973       __ ldrh(dest->as_register(), as_Address(from_addr));
 974       break;
 975     case T_SHORT:
 976       __ ldrsh(dest->as_register(), as_Address(from_addr));
 977       break;
 978 
 979     default:
 980       ShouldNotReachHere();
 981   }
 982 
 983   if (is_reference_type(type)) {
 984     if (UseCompressedOops && !wide) {
 985       __ decode_heap_oop(dest->as_register());
 986     }
 987 
 988     __ verify_oop(dest->as_register());
 989   }
 990 }
 991 














 992 
 993 int LIR_Assembler::array_element_size(BasicType type) const {
 994   int elem_size = type2aelembytes(type);
 995   return exact_log2(elem_size);
 996 }
 997 
 998 
 999 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1000   switch (op->code()) {
1001   case lir_idiv:
1002   case lir_irem:
1003     arithmetic_idiv(op->code(),
1004                     op->in_opr1(),
1005                     op->in_opr2(),
1006                     op->in_opr3(),
1007                     op->result_opr(),
1008                     op->info());
1009     break;
1010   case lir_fmad:
1011     __ fmaddd(op->result_opr()->as_double_reg(),

1163     __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1164     __ ldarb(rscratch1, rscratch1);
1165     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1166     add_debug_info_for_null_check_here(op->stub()->info());
1167     __ br(Assembler::NE, *op->stub()->entry());
1168   }
1169   __ allocate_object(op->obj()->as_register(),
1170                      op->tmp1()->as_register(),
1171                      op->tmp2()->as_register(),
1172                      op->header_size(),
1173                      op->object_size(),
1174                      op->klass()->as_register(),
1175                      *op->stub()->entry());
1176   __ bind(*op->stub()->continuation());
1177 }
1178 
1179 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1180   Register len =  op->len()->as_register();
1181   __ uxtw(len, len);
1182 
1183   if (UseSlowPath ||
1184       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1185       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1186     __ b(*op->stub()->entry());
1187   } else {
1188     Register tmp1 = op->tmp1()->as_register();
1189     Register tmp2 = op->tmp2()->as_register();
1190     Register tmp3 = op->tmp3()->as_register();
1191     if (len == tmp1) {
1192       tmp1 = tmp3;
1193     } else if (len == tmp2) {
1194       tmp2 = tmp3;
1195     } else if (len == tmp3) {
1196       // everything is ok
1197     } else {
1198       __ mov(tmp3, len);
1199     }
1200     __ allocate_array(op->obj()->as_register(),
1201                       len,
1202                       tmp1,
1203                       tmp2,

1275     assert(data != nullptr,                "need data for type check");
1276     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1277   }
1278   Label* success_target = success;
1279   Label* failure_target = failure;
1280 
1281   if (obj == k_RInfo) {
1282     k_RInfo = dst;
1283   } else if (obj == klass_RInfo) {
1284     klass_RInfo = dst;
1285   }
1286   if (k->is_loaded() && !UseCompressedClassPointers) {
1287     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1288   } else {
1289     Rtmp1 = op->tmp3()->as_register();
1290     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1291   }
1292 
1293   assert_different_registers(obj, k_RInfo, klass_RInfo);
1294 
1295   if (should_profile) {
1296     Register mdo  = klass_RInfo;
1297     __ mov_metadata(mdo, md->constant_encoding());
1298     Label not_null;
1299     __ cbnz(obj, not_null);
1300     // Object is null; update MDO and exit
1301     Address data_addr
1302       = __ form_address(rscratch2, mdo,
1303                         md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1304                         0);
1305     __ ldrb(rscratch1, data_addr);
1306     __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1307     __ strb(rscratch1, data_addr);
1308     __ b(*obj_is_null);
1309     __ bind(not_null);
1310 
1311     Label update_done;
1312     Register recv = k_RInfo;
1313     __ load_klass(recv, obj);
1314     type_profile_helper(mdo, md, data, recv, &update_done);
1315     Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1316     __ addptr(counter_addr, DataLayout::counter_increment);
1317 
1318     __ bind(update_done);
1319   } else {
1320     __ cbz(obj, *obj_is_null);








1321   }
1322 
1323   if (!k->is_loaded()) {
1324     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1325   } else {
1326     __ mov_metadata(k_RInfo, k->constant_encoding());
1327   }
1328   __ verify_oop(obj);
1329 
1330   if (op->fast_check()) {
1331     // get object class
1332     // not a safepoint as obj null check happens earlier
1333     __ load_klass(rscratch1, obj);
1334     __ cmp( rscratch1, k_RInfo);
1335 
1336     __ br(Assembler::NE, *failure_target);
1337     // successful cast, fall through to profile or jump
1338   } else {
1339     // get object class
1340     // not a safepoint as obj null check happens earlier

1458     __ bind(success);
1459     if (dst != obj) {
1460       __ mov(dst, obj);
1461     }
1462   } else if (code == lir_instanceof) {
1463     Register obj = op->object()->as_register();
1464     Register dst = op->result_opr()->as_register();
1465     Label success, failure, done;
1466     emit_typecheck_helper(op, &success, &failure, &failure);
1467     __ bind(failure);
1468     __ mov(dst, zr);
1469     __ b(done);
1470     __ bind(success);
1471     __ mov(dst, 1);
1472     __ bind(done);
1473   } else {
1474     ShouldNotReachHere();
1475   }
1476 }
1477 










































































































1478 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1479   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1480   __ cset(rscratch1, Assembler::NE);
1481   __ membar(__ AnyAny);
1482 }
1483 
1484 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1485   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1486   __ cset(rscratch1, Assembler::NE);
1487   __ membar(__ AnyAny);
1488 }
1489 
1490 
1491 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1492   Register addr;
1493   if (op->addr()->is_register()) {
1494     addr = as_reg(op->addr());
1495   } else {
1496     assert(op->addr()->is_address(), "what else?");
1497     LIR_Address* addr_ptr = op->addr()->as_address_ptr();

1971     __ cmp(left->as_register_lo(), right->as_register_lo());
1972     __ mov(dst->as_register(), (uint64_t)-1L);
1973     __ br(Assembler::LT, done);
1974     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1975     __ bind(done);
1976   } else {
1977     ShouldNotReachHere();
1978   }
1979 }
1980 
1981 
1982 void LIR_Assembler::align_call(LIR_Code code) {  }
1983 
1984 
1985 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1986   address call = __ trampoline_call(Address(op->addr(), rtype));
1987   if (call == nullptr) {
1988     bailout("trampoline stub overflow");
1989     return;
1990   }
1991   add_call_info(code_offset(), op->info());
1992   __ post_call_nop();
1993 }
1994 
1995 
1996 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1997   address call = __ ic_call(op->addr());
1998   if (call == nullptr) {
1999     bailout("trampoline stub overflow");
2000     return;
2001   }
2002   add_call_info(code_offset(), op->info());
2003   __ post_call_nop();
2004 }
2005 
2006 void LIR_Assembler::emit_static_call_stub() {
2007   address call_pc = __ pc();
2008   address stub = __ start_a_stub(call_stub_size());
2009   if (stub == nullptr) {
2010     bailout("static call stub overflow");
2011     return;
2012   }
2013 
2014   int start = __ offset();
2015 
2016   __ relocate(static_stub_Relocation::spec(call_pc));
2017   __ emit_static_call_stub();
2018 
2019   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2020         <= call_stub_size(), "stub too big");
2021   __ end_a_stub();
2022 }

2145 
2146 
2147 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2148   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2149   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2150   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2151   __ mov (rscratch1, c);
2152   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2153 }
2154 
2155 
2156 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2157   ShouldNotReachHere();
2158   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2159   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2160   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2161   __ lea(rscratch1, __ constant_oop_address(o));
2162   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2163 }
2164 












2165 
2166 // This code replaces a call to arraycopy; no exception may
2167 // be thrown in this code, they must be thrown in the System.arraycopy
2168 // activation frame; we could save some checks if this would not be the case
2169 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2170   ciArrayKlass* default_type = op->expected_type();
2171   Register src = op->src()->as_register();
2172   Register dst = op->dst()->as_register();
2173   Register src_pos = op->src_pos()->as_register();
2174   Register dst_pos = op->dst_pos()->as_register();
2175   Register length  = op->length()->as_register();
2176   Register tmp = op->tmp()->as_register();
2177 
2178   CodeStub* stub = op->stub();
2179   int flags = op->flags();
2180   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2181   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2182 






2183   // if we don't know anything, just go through the generic arraycopy
2184   if (default_type == nullptr // || basic_type == T_OBJECT
2185       ) {
2186     Label done;
2187     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2188 
2189     // Save the arguments in case the generic arraycopy fails and we
2190     // have to fall back to the JNI stub
2191     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2192     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2193     __ str(src,              Address(sp, 4*BytesPerWord));
2194 
2195     address copyfunc_addr = StubRoutines::generic_arraycopy();
2196     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2197 
2198     // The arguments are in java calling convention so we shift them
2199     // to C convention
2200     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2201     __ mov(c_rarg0, j_rarg0);
2202     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2216     __ cbz(r0, *stub->continuation());
2217 
2218     // Reload values from the stack so they are where the stub
2219     // expects them.
2220     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2221     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2222     __ ldr(src,              Address(sp, 4*BytesPerWord));
2223 
2224     // r0 is -1^K where K == partial copied count
2225     __ eonw(rscratch1, r0, zr);
2226     // adjust length down and src/end pos up by partial copied count
2227     __ subw(length, length, rscratch1);
2228     __ addw(src_pos, src_pos, rscratch1);
2229     __ addw(dst_pos, dst_pos, rscratch1);
2230     __ b(*stub->entry());
2231 
2232     __ bind(*stub->continuation());
2233     return;
2234   }
2235 








2236   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2237 
2238   int elem_size = type2aelembytes(basic_type);
2239   int scale = exact_log2(elem_size);
2240 
2241   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2242   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2243 
2244   // test for null
2245   if (flags & LIR_OpArrayCopy::src_null_check) {
2246     __ cbz(src, *stub->entry());
2247   }
2248   if (flags & LIR_OpArrayCopy::dst_null_check) {
2249     __ cbz(dst, *stub->entry());
2250   }
2251 
2252   // If the compiler was not able to prove that exact type of the source or the destination
2253   // of the arraycopy is an array type, check at runtime if the source or the destination is
2254   // an instance type.
2255   if (flags & LIR_OpArrayCopy::type_check) {

2764         __ verify_klass_ptr(tmp);
2765 #endif
2766       } else {
2767         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2768                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2769 
2770         __ ldr(tmp, mdo_addr);
2771         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2772 
2773         __ orr(tmp, tmp, TypeEntries::type_unknown);
2774         __ str(tmp, mdo_addr);
2775         // FIXME: Write barrier needed here?
2776       }
2777     }
2778 
2779     __ bind(next);
2780   }
2781   COMMENT("} emit_profile_type");
2782 }
2783 




















2784 
2785 void LIR_Assembler::align_backward_branch_target() {
2786 }
2787 
2788 
2789 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2790   // tmp must be unused
2791   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2792 
2793   if (left->is_single_cpu()) {
2794     assert(dest->is_single_cpu(), "expect single result reg");
2795     __ negw(dest->as_register(), left->as_register());
2796   } else if (left->is_double_cpu()) {
2797     assert(dest->is_double_cpu(), "expect double result reg");
2798     __ neg(dest->as_register_lo(), left->as_register_lo());
2799   } else if (left->is_single_fpu()) {
2800     assert(dest->is_single_fpu(), "expect single float result reg");
2801     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2802   } else {
2803     assert(left->is_double_fpu(), "expect double float operand reg");

2903 void LIR_Assembler::membar_loadload() {
2904   __ membar(Assembler::LoadLoad);
2905 }
2906 
2907 void LIR_Assembler::membar_storestore() {
2908   __ membar(MacroAssembler::StoreStore);
2909 }
2910 
2911 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2912 
2913 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2914 
2915 void LIR_Assembler::on_spin_wait() {
2916   __ spin_wait();
2917 }
2918 
2919 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2920   __ mov(result_reg->as_register(), rthread);
2921 }
2922 




2923 
2924 void LIR_Assembler::peephole(LIR_List *lir) {
2925 #if 0
2926   if (tableswitch_count >= max_tableswitches)
2927     return;
2928 
2929   /*
2930     This finite-state automaton recognizes sequences of compare-and-
2931     branch instructions.  We will turn them into a tableswitch.  You
2932     could argue that C1 really shouldn't be doing this sort of
2933     optimization, but without it the code is really horrible.
2934   */
2935 
2936   enum { start_s, cmp1_s, beq_s, cmp_s } state;
2937   int first_key, last_key = -2147483648;
2938   int next_key = 0;
2939   int start_insn = -1;
2940   int last_insn = -1;
2941   Register reg = noreg;
2942   LIR_Opr reg_opr;

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 #include "vmreg_aarch64.inline.hpp"
  48 
  49 
  50 #ifndef PRODUCT
  51 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  52 #else
  53 #define COMMENT(x)
  54 #endif
  55 
  56 NEEDS_CLEANUP // remove this definitions ?
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 

 411   MonitorExitStub* stub = nullptr;
 412   if (method()->is_synchronized()) {
 413     monitor_address(0, FrameMap::r0_opr);
 414     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 415     __ unlock_object(r5, r4, r0, r6, *stub->entry());
 416     __ bind(*stub->continuation());
 417   }
 418 
 419   if (compilation()->env()->dtrace_method_probes()) {
 420     __ mov(c_rarg0, rthread);
 421     __ mov_metadata(c_rarg1, method()->constant_encoding());
 422     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 423   }
 424 
 425   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 426     __ mov(r0, r19);  // Restore the exception
 427   }
 428 
 429   // remove the activation and dispatch to the unwind handler
 430   __ block_comment("remove_frame and dispatch to the unwind handler");
 431   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 432   __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
 433 
 434   // Emit the slow path assembly
 435   if (stub != nullptr) {
 436     stub->emit_code(this);
 437   }
 438 
 439   return offset;
 440 }
 441 
 442 
 443 int LIR_Assembler::emit_deopt_handler() {
 444   // generate code for exception handler
 445   address handler_base = __ start_a_stub(deopt_handler_size());
 446   if (handler_base == nullptr) {
 447     // not enough space left for the handler
 448     bailout("deopt handler overflow");
 449     return -1;
 450   }
 451 

 455   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 456   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 457   __ end_a_stub();
 458 
 459   return offset;
 460 }
 461 
 462 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 463   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 464   int pc_offset = code_offset();
 465   flush_debug_info(pc_offset);
 466   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 467   if (info->exception_handlers() != nullptr) {
 468     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 469   }
 470 }
 471 
 472 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 473   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 474 
 475   if (InlineTypeReturnedAsFields) {
 476     // Check if we are returning an non-null inline type and load its fields into registers
 477     ciType* return_type = compilation()->method()->return_type();
 478     if (return_type->is_inlinetype()) {
 479       ciInlineKlass* vk = return_type->as_inline_klass();
 480       if (vk->can_be_returned_as_fields()) {
 481         address unpack_handler = vk->unpack_handler();
 482         assert(unpack_handler != nullptr, "must be");
 483         __ far_call(RuntimeAddress(unpack_handler));
 484       }
 485     } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
 486       Label skip;
 487       Label not_null;
 488       __ cbnz(r0, not_null);
 489       // Returned value is null, zero all return registers because they may belong to oop fields
 490       __ mov(j_rarg1, zr);
 491       __ mov(j_rarg2, zr);
 492       __ mov(j_rarg3, zr);
 493       __ mov(j_rarg4, zr);
 494       __ mov(j_rarg5, zr);
 495       __ mov(j_rarg6, zr);
 496       __ mov(j_rarg7, zr);
 497       __ b(skip);
 498       __ bind(not_null);
 499 
 500       // Check if we are returning an non-null inline type and load its fields into registers
 501       __ test_oop_is_not_inline_type(r0, rscratch2, skip, /* can_be_null= */ false);
 502 
 503       // Load fields from a buffered value with an inline class specific handler
 504       __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
 505       __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 506       __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 507       // Unpack handler can be null if inline type is not scalarizable in returns
 508       __ cbz(rscratch1, skip);
 509       __ blr(rscratch1);
 510 
 511       __ bind(skip);
 512     }
 513     // At this point, r0 points to the value object (for interpreter or C1 caller).
 514     // The fields of the object are copied into registers (for C2 caller).
 515   }
 516 
 517   // Pop the stack before the safepoint code
 518   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 519 
 520   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 521     __ reserved_stack_check();
 522   }
 523 
 524   code_stub->set_safepoint_offset(__ offset());
 525   __ relocate(relocInfo::poll_return_type);
 526   __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
 527   __ ret(lr);
 528 }
 529 
 530 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 531   return (__ store_inline_type_fields_to_buf(vk, false));
 532 }
 533 
 534 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 535   guarantee(info != nullptr, "Shouldn't be null");
 536   __ get_polling_page(rscratch1, relocInfo::poll_type);
 537   add_debug_info_for_branch(info);  // This isn't just debug info:
 538                                     // it's the oop map
 539   __ read_polling_page(rscratch1, relocInfo::poll_type);
 540   return __ offset();
 541 }
 542 
 543 
 544 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 545   if (from_reg == r31_sp)
 546     from_reg = sp;
 547   if (to_reg == r31_sp)
 548     to_reg = sp;
 549   __ mov(to_reg, from_reg);
 550 }
 551 
 552 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 553 

 560   switch (c->type()) {
 561     case T_INT: {
 562       assert(patch_code == lir_patch_none, "no patching handled here");
 563       __ movw(dest->as_register(), c->as_jint());
 564       break;
 565     }
 566 
 567     case T_ADDRESS: {
 568       assert(patch_code == lir_patch_none, "no patching handled here");
 569       __ mov(dest->as_register(), c->as_jint());
 570       break;
 571     }
 572 
 573     case T_LONG: {
 574       assert(patch_code == lir_patch_none, "no patching handled here");
 575       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 576       break;
 577     }
 578 
 579     case T_OBJECT: {
 580         if (patch_code != lir_patch_none) {


 581           jobject2reg_with_patching(dest->as_register(), info);
 582         } else {
 583           jobject2reg(c->as_jobject(), dest->as_register());
 584         }
 585       break;
 586     }
 587 
 588     case T_METADATA: {
 589       if (patch_code != lir_patch_none) {
 590         klass2reg_with_patching(dest->as_register(), info);
 591       } else {
 592         __ mov_metadata(dest->as_register(), c->as_metadata());
 593       }
 594       break;
 595     }
 596 
 597     case T_FLOAT: {
 598       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 599         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 600       } else {
 601         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 602         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 603       }

 673   LIR_Const* c = src->as_constant_ptr();
 674   LIR_Address* to_addr = dest->as_address_ptr();
 675 
 676   void (Assembler::* insn)(Register Rt, const Address &adr);
 677 
 678   switch (type) {
 679   case T_ADDRESS:
 680     assert(c->as_jint() == 0, "should be");
 681     insn = &Assembler::str;
 682     break;
 683   case T_LONG:
 684     assert(c->as_jlong() == 0, "should be");
 685     insn = &Assembler::str;
 686     break;
 687   case T_INT:
 688     assert(c->as_jint() == 0, "should be");
 689     insn = &Assembler::strw;
 690     break;
 691   case T_OBJECT:
 692   case T_ARRAY:
 693     // Non-null case is not handled on aarch64 but handled on x86
 694     // FIXME: do we need to add it here?
 695     assert(c->as_jobject() == nullptr, "should be");
 696     if (UseCompressedOops && !wide) {
 697       insn = &Assembler::strw;
 698     } else {
 699       insn = &Assembler::str;
 700     }
 701     break;
 702   case T_CHAR:
 703   case T_SHORT:
 704     assert(c->as_jint() == 0, "should be");
 705     insn = &Assembler::strh;
 706     break;
 707   case T_BOOLEAN:
 708   case T_BYTE:
 709     assert(c->as_jint() == 0, "should be");
 710     insn = &Assembler::strb;
 711     break;
 712   default:
 713     ShouldNotReachHere();
 714     insn = &Assembler::str;  // unreachable

1022     case T_CHAR:
1023       __ ldrh(dest->as_register(), as_Address(from_addr));
1024       break;
1025     case T_SHORT:
1026       __ ldrsh(dest->as_register(), as_Address(from_addr));
1027       break;
1028 
1029     default:
1030       ShouldNotReachHere();
1031   }
1032 
1033   if (is_reference_type(type)) {
1034     if (UseCompressedOops && !wide) {
1035       __ decode_heap_oop(dest->as_register());
1036     }
1037 
1038     __ verify_oop(dest->as_register());
1039   }
1040 }
1041 
1042 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1043   assert(dst->is_cpu_register(), "must be");
1044   assert(dst->type() == src->type(), "must be");
1045 
1046   if (src->is_cpu_register()) {
1047     reg2reg(src, dst);
1048   } else if (src->is_stack()) {
1049     stack2reg(src, dst, dst->type());
1050   } else if (src->is_constant()) {
1051     const2reg(src, dst, lir_patch_none, nullptr);
1052   } else {
1053     ShouldNotReachHere();
1054   }
1055 }
1056 
1057 int LIR_Assembler::array_element_size(BasicType type) const {
1058   int elem_size = type2aelembytes(type);
1059   return exact_log2(elem_size);
1060 }
1061 
1062 
1063 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1064   switch (op->code()) {
1065   case lir_idiv:
1066   case lir_irem:
1067     arithmetic_idiv(op->code(),
1068                     op->in_opr1(),
1069                     op->in_opr2(),
1070                     op->in_opr3(),
1071                     op->result_opr(),
1072                     op->info());
1073     break;
1074   case lir_fmad:
1075     __ fmaddd(op->result_opr()->as_double_reg(),

1227     __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1228     __ ldarb(rscratch1, rscratch1);
1229     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1230     add_debug_info_for_null_check_here(op->stub()->info());
1231     __ br(Assembler::NE, *op->stub()->entry());
1232   }
1233   __ allocate_object(op->obj()->as_register(),
1234                      op->tmp1()->as_register(),
1235                      op->tmp2()->as_register(),
1236                      op->header_size(),
1237                      op->object_size(),
1238                      op->klass()->as_register(),
1239                      *op->stub()->entry());
1240   __ bind(*op->stub()->continuation());
1241 }
1242 
1243 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1244   Register len =  op->len()->as_register();
1245   __ uxtw(len, len);
1246 
1247   if (UseSlowPath || op->always_slow_path() ||
1248       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1249       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1250     __ b(*op->stub()->entry());
1251   } else {
1252     Register tmp1 = op->tmp1()->as_register();
1253     Register tmp2 = op->tmp2()->as_register();
1254     Register tmp3 = op->tmp3()->as_register();
1255     if (len == tmp1) {
1256       tmp1 = tmp3;
1257     } else if (len == tmp2) {
1258       tmp2 = tmp3;
1259     } else if (len == tmp3) {
1260       // everything is ok
1261     } else {
1262       __ mov(tmp3, len);
1263     }
1264     __ allocate_array(op->obj()->as_register(),
1265                       len,
1266                       tmp1,
1267                       tmp2,

1339     assert(data != nullptr,                "need data for type check");
1340     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1341   }
1342   Label* success_target = success;
1343   Label* failure_target = failure;
1344 
1345   if (obj == k_RInfo) {
1346     k_RInfo = dst;
1347   } else if (obj == klass_RInfo) {
1348     klass_RInfo = dst;
1349   }
1350   if (k->is_loaded() && !UseCompressedClassPointers) {
1351     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1352   } else {
1353     Rtmp1 = op->tmp3()->as_register();
1354     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1355   }
1356 
1357   assert_different_registers(obj, k_RInfo, klass_RInfo);
1358 
1359   if (op->need_null_check()) {
1360     if (should_profile) {
1361       Register mdo  = klass_RInfo;
1362       __ mov_metadata(mdo, md->constant_encoding());
1363       Label not_null;
1364       __ cbnz(obj, not_null);
1365       // Object is null; update MDO and exit
1366       Address data_addr
1367         = __ form_address(rscratch2, mdo,
1368                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1369                           0);
1370       __ ldrb(rscratch1, data_addr);
1371       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1372       __ strb(rscratch1, data_addr);
1373       __ b(*obj_is_null);
1374       __ bind(not_null);






1375 
1376       Label update_done;
1377       Register recv = k_RInfo;
1378       __ load_klass(recv, obj);
1379       type_profile_helper(mdo, md, data, recv, &update_done);
1380       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1381       __ addptr(counter_addr, DataLayout::counter_increment);
1382 
1383       __ bind(update_done);
1384     } else {
1385       __ cbz(obj, *obj_is_null);
1386     }
1387   }
1388 
1389   if (!k->is_loaded()) {
1390     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1391   } else {
1392     __ mov_metadata(k_RInfo, k->constant_encoding());
1393   }
1394   __ verify_oop(obj);
1395 
1396   if (op->fast_check()) {
1397     // get object class
1398     // not a safepoint as obj null check happens earlier
1399     __ load_klass(rscratch1, obj);
1400     __ cmp( rscratch1, k_RInfo);
1401 
1402     __ br(Assembler::NE, *failure_target);
1403     // successful cast, fall through to profile or jump
1404   } else {
1405     // get object class
1406     // not a safepoint as obj null check happens earlier

1524     __ bind(success);
1525     if (dst != obj) {
1526       __ mov(dst, obj);
1527     }
1528   } else if (code == lir_instanceof) {
1529     Register obj = op->object()->as_register();
1530     Register dst = op->result_opr()->as_register();
1531     Label success, failure, done;
1532     emit_typecheck_helper(op, &success, &failure, &failure);
1533     __ bind(failure);
1534     __ mov(dst, zr);
1535     __ b(done);
1536     __ bind(success);
1537     __ mov(dst, 1);
1538     __ bind(done);
1539   } else {
1540     ShouldNotReachHere();
1541   }
1542 }
1543 
1544 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1545   // We are loading/storing from/to an array that *may* be a flat array (the
1546   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1547   // If this array is a flat array, take the slow path.
1548   __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1549   if (!op->value()->is_illegal()) {
1550     // The array is not a flat array, but it might be null-free. If we are storing
1551     // a null into a null-free array, take the slow path (which will throw NPE).
1552     Label skip;
1553     __ cbnz(op->value()->as_register(), skip);
1554     __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1555     __ bind(skip);
1556   }
1557 }
1558 
1559 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1560   // We are storing into an array that *may* be null-free (the declared type is
1561   // Object[], abstract[], interface[] or VT.ref[]).
1562   Label test_mark_word;
1563   Register tmp = op->tmp()->as_register();
1564   __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1565   __ tst(tmp, markWord::unlocked_value);
1566   __ br(Assembler::NE, test_mark_word);
1567   __ load_prototype_header(tmp, op->array()->as_register());
1568   __ bind(test_mark_word);
1569   __ tst(tmp, markWord::null_free_array_bit_in_place);
1570 }
1571 
1572 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1573   Label L_oops_equal;
1574   Label L_oops_not_equal;
1575   Label L_end;
1576 
1577   Register left  = op->left()->as_register();
1578   Register right = op->right()->as_register();
1579 
1580   __ cmp(left, right);
1581   __ br(Assembler::EQ, L_oops_equal);
1582 
1583   // (1) Null check -- if one of the operands is null, the other must not be null (because
1584   //     the two references are not equal), so they are not substitutable,
1585   //     FIXME: do null check only if the operand is nullable
1586   {
1587     __ cbz(left, L_oops_not_equal);
1588     __ cbz(right, L_oops_not_equal);
1589   }
1590 
1591   ciKlass* left_klass = op->left_klass();
1592   ciKlass* right_klass = op->right_klass();
1593 
1594   // (2) Inline type check -- if either of the operands is not a inline type,
1595   //     they are not substitutable. We do this only if we are not sure that the
1596   //     operands are inline type
1597   if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1598       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1599     Register tmp1  = op->tmp1()->as_register();
1600     __ mov(tmp1, markWord::inline_type_pattern);
1601     __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1602     __ andr(tmp1, tmp1, rscratch1);
1603     __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1604     __ andr(tmp1, tmp1, rscratch1);
1605     __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1606     __ br(Assembler::NE, L_oops_not_equal);
1607   }
1608 
1609   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1610   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1611     // No need to load klass -- the operands are statically known to be the same inline klass.
1612     __ b(*op->stub()->entry());
1613   } else {
1614     Register left_klass_op = op->left_klass_op()->as_register();
1615     Register right_klass_op = op->right_klass_op()->as_register();
1616 
1617     if (UseCompressedClassPointers) {
1618       __ ldrw(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1619       __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1620       __ cmpw(left_klass_op, right_klass_op);
1621     } else {
1622       __ ldr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1623       __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1624       __ cmp(left_klass_op, right_klass_op);
1625     }
1626 
1627     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1628     // fall through to L_oops_not_equal
1629   }
1630 
1631   __ bind(L_oops_not_equal);
1632   move(op->not_equal_result(), op->result_opr());
1633   __ b(L_end);
1634 
1635   __ bind(L_oops_equal);
1636   move(op->equal_result(), op->result_opr());
1637   __ b(L_end);
1638 
1639   // We've returned from the stub. R0 contains 0x0 IFF the two
1640   // operands are not substitutable. (Don't compare against 0x1 in case the
1641   // C compiler is naughty)
1642   __ bind(*op->stub()->continuation());
1643   __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1644   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1645   // fall-through
1646   __ bind(L_end);
1647 }
1648 
1649 
1650 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1651   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1652   __ cset(rscratch1, Assembler::NE);
1653   __ membar(__ AnyAny);
1654 }
1655 
1656 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1657   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1658   __ cset(rscratch1, Assembler::NE);
1659   __ membar(__ AnyAny);
1660 }
1661 
1662 
1663 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1664   Register addr;
1665   if (op->addr()->is_register()) {
1666     addr = as_reg(op->addr());
1667   } else {
1668     assert(op->addr()->is_address(), "what else?");
1669     LIR_Address* addr_ptr = op->addr()->as_address_ptr();

2143     __ cmp(left->as_register_lo(), right->as_register_lo());
2144     __ mov(dst->as_register(), (uint64_t)-1L);
2145     __ br(Assembler::LT, done);
2146     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2147     __ bind(done);
2148   } else {
2149     ShouldNotReachHere();
2150   }
2151 }
2152 
2153 
2154 void LIR_Assembler::align_call(LIR_Code code) {  }
2155 
2156 
2157 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2158   address call = __ trampoline_call(Address(op->addr(), rtype));
2159   if (call == nullptr) {
2160     bailout("trampoline stub overflow");
2161     return;
2162   }
2163   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2164   __ post_call_nop();
2165 }
2166 
2167 
2168 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2169   address call = __ ic_call(op->addr());
2170   if (call == nullptr) {
2171     bailout("trampoline stub overflow");
2172     return;
2173   }
2174   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2175   __ post_call_nop();
2176 }
2177 
2178 void LIR_Assembler::emit_static_call_stub() {
2179   address call_pc = __ pc();
2180   address stub = __ start_a_stub(call_stub_size());
2181   if (stub == nullptr) {
2182     bailout("static call stub overflow");
2183     return;
2184   }
2185 
2186   int start = __ offset();
2187 
2188   __ relocate(static_stub_Relocation::spec(call_pc));
2189   __ emit_static_call_stub();
2190 
2191   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2192         <= call_stub_size(), "stub too big");
2193   __ end_a_stub();
2194 }

2317 
2318 
2319 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2320   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2321   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2322   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2323   __ mov (rscratch1, c);
2324   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2325 }
2326 
2327 
2328 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2329   ShouldNotReachHere();
2330   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2331   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2332   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2333   __ lea(rscratch1, __ constant_oop_address(o));
2334   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2335 }
2336 
2337 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2338   if (null_check) {
2339     __ cbz(obj, *slow_path->entry());
2340   }
2341   if (is_dest) {
2342     __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2343     // TODO 8350865 Flat no longer implies null-free, so we need to check for flat dest. Can we do better here?
2344     __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2345   } else {
2346     __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2347   }
2348 }
2349 
2350 // This code replaces a call to arraycopy; no exception may
2351 // be thrown in this code, they must be thrown in the System.arraycopy
2352 // activation frame; we could save some checks if this would not be the case
2353 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2354   ciArrayKlass* default_type = op->expected_type();
2355   Register src = op->src()->as_register();
2356   Register dst = op->dst()->as_register();
2357   Register src_pos = op->src_pos()->as_register();
2358   Register dst_pos = op->dst_pos()->as_register();
2359   Register length  = op->length()->as_register();
2360   Register tmp = op->tmp()->as_register();
2361 
2362   CodeStub* stub = op->stub();
2363   int flags = op->flags();
2364   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2365   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2366 
2367   if (flags & LIR_OpArrayCopy::always_slow_path) {
2368     __ b(*stub->entry());
2369     __ bind(*stub->continuation());
2370     return;
2371   }
2372 
2373   // if we don't know anything, just go through the generic arraycopy
2374   if (default_type == nullptr // || basic_type == T_OBJECT
2375       ) {
2376     Label done;
2377     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2378 
2379     // Save the arguments in case the generic arraycopy fails and we
2380     // have to fall back to the JNI stub
2381     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2382     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2383     __ str(src,              Address(sp, 4*BytesPerWord));
2384 
2385     address copyfunc_addr = StubRoutines::generic_arraycopy();
2386     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2387 
2388     // The arguments are in java calling convention so we shift them
2389     // to C convention
2390     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2391     __ mov(c_rarg0, j_rarg0);
2392     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2406     __ cbz(r0, *stub->continuation());
2407 
2408     // Reload values from the stack so they are where the stub
2409     // expects them.
2410     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2411     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2412     __ ldr(src,              Address(sp, 4*BytesPerWord));
2413 
2414     // r0 is -1^K where K == partial copied count
2415     __ eonw(rscratch1, r0, zr);
2416     // adjust length down and src/end pos up by partial copied count
2417     __ subw(length, length, rscratch1);
2418     __ addw(src_pos, src_pos, rscratch1);
2419     __ addw(dst_pos, dst_pos, rscratch1);
2420     __ b(*stub->entry());
2421 
2422     __ bind(*stub->continuation());
2423     return;
2424   }
2425 
2426   // Handle inline type arrays
2427   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2428     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2429   }
2430   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2431     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2432   }
2433 
2434   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2435 
2436   int elem_size = type2aelembytes(basic_type);
2437   int scale = exact_log2(elem_size);
2438 
2439   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2440   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2441 
2442   // test for null
2443   if (flags & LIR_OpArrayCopy::src_null_check) {
2444     __ cbz(src, *stub->entry());
2445   }
2446   if (flags & LIR_OpArrayCopy::dst_null_check) {
2447     __ cbz(dst, *stub->entry());
2448   }
2449 
2450   // If the compiler was not able to prove that exact type of the source or the destination
2451   // of the arraycopy is an array type, check at runtime if the source or the destination is
2452   // an instance type.
2453   if (flags & LIR_OpArrayCopy::type_check) {

2962         __ verify_klass_ptr(tmp);
2963 #endif
2964       } else {
2965         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2966                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2967 
2968         __ ldr(tmp, mdo_addr);
2969         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2970 
2971         __ orr(tmp, tmp, TypeEntries::type_unknown);
2972         __ str(tmp, mdo_addr);
2973         // FIXME: Write barrier needed here?
2974       }
2975     }
2976 
2977     __ bind(next);
2978   }
2979   COMMENT("} emit_profile_type");
2980 }
2981 
2982 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
2983   Register obj = op->obj()->as_register();
2984   Register tmp = op->tmp()->as_pointer_register();
2985   bool not_null = op->not_null();
2986   int flag = op->flag();
2987 
2988   Label not_inline_type;
2989   if (!not_null) {
2990     __ cbz(obj, not_inline_type);
2991   }
2992 
2993   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
2994 
2995   Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
2996   __ ldrb(rscratch1, mdo_addr);
2997   __ orr(rscratch1, rscratch1, flag);
2998   __ strb(rscratch1, mdo_addr);
2999 
3000   __ bind(not_inline_type);
3001 }
3002 
3003 void LIR_Assembler::align_backward_branch_target() {
3004 }
3005 
3006 
3007 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3008   // tmp must be unused
3009   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3010 
3011   if (left->is_single_cpu()) {
3012     assert(dest->is_single_cpu(), "expect single result reg");
3013     __ negw(dest->as_register(), left->as_register());
3014   } else if (left->is_double_cpu()) {
3015     assert(dest->is_double_cpu(), "expect double result reg");
3016     __ neg(dest->as_register_lo(), left->as_register_lo());
3017   } else if (left->is_single_fpu()) {
3018     assert(dest->is_single_fpu(), "expect single float result reg");
3019     __ fnegs(dest->as_float_reg(), left->as_float_reg());
3020   } else {
3021     assert(left->is_double_fpu(), "expect double float operand reg");

3121 void LIR_Assembler::membar_loadload() {
3122   __ membar(Assembler::LoadLoad);
3123 }
3124 
3125 void LIR_Assembler::membar_storestore() {
3126   __ membar(MacroAssembler::StoreStore);
3127 }
3128 
3129 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3130 
3131 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3132 
3133 void LIR_Assembler::on_spin_wait() {
3134   __ spin_wait();
3135 }
3136 
3137 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3138   __ mov(result_reg->as_register(), rthread);
3139 }
3140 
3141 void LIR_Assembler::check_orig_pc() {
3142   __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3143   __ cmp(rscratch2, (u1)NULL_WORD);
3144 }
3145 
3146 void LIR_Assembler::peephole(LIR_List *lir) {
3147 #if 0
3148   if (tableswitch_count >= max_tableswitches)
3149     return;
3150 
3151   /*
3152     This finite-state automaton recognizes sequences of compare-and-
3153     branch instructions.  We will turn them into a tableswitch.  You
3154     could argue that C1 really shouldn't be doing this sort of
3155     optimization, but without it the code is really horrible.
3156   */
3157 
3158   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3159   int first_key, last_key = -2147483648;
3160   int next_key = 0;
3161   int start_insn = -1;
3162   int last_insn = -1;
3163   Register reg = noreg;
3164   LIR_Opr reg_opr;
< prev index next >