< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"

  35 #include "ci/ciInstance.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_aarch64.hpp"
  40 #include "oops/objArrayKlass.hpp"

  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_aarch64.inline.hpp"
  46 
  47 
  48 #ifndef PRODUCT
  49 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  50 #else
  51 #define COMMENT(x)
  52 #endif
  53 
  54 NEEDS_CLEANUP // remove this definitions ?
  55 const Register SYNC_header = r0;   // synchronization header
  56 const Register SHIFT_count = r0;   // where count for shift operations must be
  57 
  58 #define __ _masm->
  59 
  60 

 413     if (LockingMode == LM_MONITOR) {
 414       __ b(*stub->entry());
 415     } else {
 416       __ unlock_object(r5, r4, r0, r6, *stub->entry());
 417     }
 418     __ bind(*stub->continuation());
 419   }
 420 
 421   if (compilation()->env()->dtrace_method_probes()) {
 422     __ mov(c_rarg0, rthread);
 423     __ mov_metadata(c_rarg1, method()->constant_encoding());
 424     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 425   }
 426 
 427   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 428     __ mov(r0, r19);  // Restore the exception
 429   }
 430 
 431   // remove the activation and dispatch to the unwind handler
 432   __ block_comment("remove_frame and dispatch to the unwind handler");
 433   __ remove_frame(initial_frame_size_in_bytes());
 434   __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 435 
 436   // Emit the slow path assembly
 437   if (stub != nullptr) {
 438     stub->emit_code(this);
 439   }
 440 
 441   return offset;
 442 }
 443 
 444 
 445 int LIR_Assembler::emit_deopt_handler() {
 446   // generate code for exception handler
 447   address handler_base = __ start_a_stub(deopt_handler_size());
 448   if (handler_base == nullptr) {
 449     // not enough space left for the handler
 450     bailout("deopt handler overflow");
 451     return -1;
 452   }
 453 

 457   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 458   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 459   __ end_a_stub();
 460 
 461   return offset;
 462 }
 463 
 464 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 465   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 466   int pc_offset = code_offset();
 467   flush_debug_info(pc_offset);
 468   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 469   if (info->exception_handlers() != nullptr) {
 470     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 471   }
 472 }
 473 
 474 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 475   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 476 










































 477   // Pop the stack before the safepoint code
 478   __ remove_frame(initial_frame_size_in_bytes());
 479 
 480   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 481     __ reserved_stack_check();
 482   }
 483 
 484   code_stub->set_safepoint_offset(__ offset());
 485   __ relocate(relocInfo::poll_return_type);
 486   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 487   __ ret(lr);
 488 }
 489 




 490 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 491   guarantee(info != nullptr, "Shouldn't be null");
 492   __ get_polling_page(rscratch1, relocInfo::poll_type);
 493   add_debug_info_for_branch(info);  // This isn't just debug info:
 494                                     // it's the oop map
 495   __ read_polling_page(rscratch1, relocInfo::poll_type);
 496   return __ offset();
 497 }
 498 
 499 
 500 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 501   if (from_reg == r31_sp)
 502     from_reg = sp;
 503   if (to_reg == r31_sp)
 504     to_reg = sp;
 505   __ mov(to_reg, from_reg);
 506 }
 507 
 508 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 509 

 516   switch (c->type()) {
 517     case T_INT: {
 518       assert(patch_code == lir_patch_none, "no patching handled here");
 519       __ movw(dest->as_register(), c->as_jint());
 520       break;
 521     }
 522 
 523     case T_ADDRESS: {
 524       assert(patch_code == lir_patch_none, "no patching handled here");
 525       __ mov(dest->as_register(), c->as_jint());
 526       break;
 527     }
 528 
 529     case T_LONG: {
 530       assert(patch_code == lir_patch_none, "no patching handled here");
 531       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 532       break;
 533     }
 534 
 535     case T_OBJECT: {
 536         if (patch_code == lir_patch_none) {
 537           jobject2reg(c->as_jobject(), dest->as_register());
 538         } else {
 539           jobject2reg_with_patching(dest->as_register(), info);


 540         }
 541       break;
 542     }
 543 
 544     case T_METADATA: {
 545       if (patch_code != lir_patch_none) {
 546         klass2reg_with_patching(dest->as_register(), info);
 547       } else {
 548         __ mov_metadata(dest->as_register(), c->as_metadata());
 549       }
 550       break;
 551     }
 552 
 553     case T_FLOAT: {
 554       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 555         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 556       } else {
 557         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 558         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 559       }

 629   LIR_Const* c = src->as_constant_ptr();
 630   LIR_Address* to_addr = dest->as_address_ptr();
 631 
 632   void (Assembler::* insn)(Register Rt, const Address &adr);
 633 
 634   switch (type) {
 635   case T_ADDRESS:
 636     assert(c->as_jint() == 0, "should be");
 637     insn = &Assembler::str;
 638     break;
 639   case T_LONG:
 640     assert(c->as_jlong() == 0, "should be");
 641     insn = &Assembler::str;
 642     break;
 643   case T_INT:
 644     assert(c->as_jint() == 0, "should be");
 645     insn = &Assembler::strw;
 646     break;
 647   case T_OBJECT:
 648   case T_ARRAY:


 649     assert(c->as_jobject() == nullptr, "should be");
 650     if (UseCompressedOops && !wide) {
 651       insn = &Assembler::strw;
 652     } else {
 653       insn = &Assembler::str;
 654     }
 655     break;
 656   case T_CHAR:
 657   case T_SHORT:
 658     assert(c->as_jint() == 0, "should be");
 659     insn = &Assembler::strh;
 660     break;
 661   case T_BOOLEAN:
 662   case T_BYTE:
 663     assert(c->as_jint() == 0, "should be");
 664     insn = &Assembler::strb;
 665     break;
 666   default:
 667     ShouldNotReachHere();
 668     insn = &Assembler::str;  // unreachable

 976     case T_CHAR:
 977       __ ldrh(dest->as_register(), as_Address(from_addr));
 978       break;
 979     case T_SHORT:
 980       __ ldrsh(dest->as_register(), as_Address(from_addr));
 981       break;
 982 
 983     default:
 984       ShouldNotReachHere();
 985   }
 986 
 987   if (is_reference_type(type)) {
 988     if (UseCompressedOops && !wide) {
 989       __ decode_heap_oop(dest->as_register());
 990     }
 991 
 992     __ verify_oop(dest->as_register());
 993   }
 994 }
 995 














 996 
 997 int LIR_Assembler::array_element_size(BasicType type) const {
 998   int elem_size = type2aelembytes(type);
 999   return exact_log2(elem_size);
1000 }
1001 
1002 
1003 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1004   switch (op->code()) {
1005   case lir_idiv:
1006   case lir_irem:
1007     arithmetic_idiv(op->code(),
1008                     op->in_opr1(),
1009                     op->in_opr2(),
1010                     op->in_opr3(),
1011                     op->result_opr(),
1012                     op->info());
1013     break;
1014   case lir_fmad:
1015     __ fmaddd(op->result_opr()->as_double_reg(),

1167     __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1168     __ ldarb(rscratch1, rscratch1);
1169     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1170     add_debug_info_for_null_check_here(op->stub()->info());
1171     __ br(Assembler::NE, *op->stub()->entry());
1172   }
1173   __ allocate_object(op->obj()->as_register(),
1174                      op->tmp1()->as_register(),
1175                      op->tmp2()->as_register(),
1176                      op->header_size(),
1177                      op->object_size(),
1178                      op->klass()->as_register(),
1179                      *op->stub()->entry());
1180   __ bind(*op->stub()->continuation());
1181 }
1182 
1183 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1184   Register len =  op->len()->as_register();
1185   __ uxtw(len, len);
1186 
1187   if (UseSlowPath ||
1188       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1189       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1190     __ b(*op->stub()->entry());
1191   } else {
1192     Register tmp1 = op->tmp1()->as_register();
1193     Register tmp2 = op->tmp2()->as_register();
1194     Register tmp3 = op->tmp3()->as_register();
1195     if (len == tmp1) {
1196       tmp1 = tmp3;
1197     } else if (len == tmp2) {
1198       tmp2 = tmp3;
1199     } else if (len == tmp3) {
1200       // everything is ok
1201     } else {
1202       __ mov(tmp3, len);
1203     }
1204     __ allocate_array(op->obj()->as_register(),
1205                       len,
1206                       tmp1,
1207                       tmp2,

1279     assert(data != nullptr,                "need data for type check");
1280     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1281   }
1282   Label* success_target = success;
1283   Label* failure_target = failure;
1284 
1285   if (obj == k_RInfo) {
1286     k_RInfo = dst;
1287   } else if (obj == klass_RInfo) {
1288     klass_RInfo = dst;
1289   }
1290   if (k->is_loaded() && !UseCompressedClassPointers) {
1291     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1292   } else {
1293     Rtmp1 = op->tmp3()->as_register();
1294     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1295   }
1296 
1297   assert_different_registers(obj, k_RInfo, klass_RInfo);
1298 
1299   if (should_profile) {
1300     Register mdo  = klass_RInfo;
1301     __ mov_metadata(mdo, md->constant_encoding());
1302     Label not_null;
1303     __ cbnz(obj, not_null);
1304     // Object is null; update MDO and exit
1305     Address data_addr
1306       = __ form_address(rscratch2, mdo,
1307                         md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1308                         0);
1309     __ ldrb(rscratch1, data_addr);
1310     __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1311     __ strb(rscratch1, data_addr);
1312     __ b(*obj_is_null);
1313     __ bind(not_null);
1314 
1315     Label update_done;
1316     Register recv = k_RInfo;
1317     __ load_klass(recv, obj);
1318     type_profile_helper(mdo, md, data, recv, &update_done);
1319     Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1320     __ addptr(counter_addr, DataLayout::counter_increment);
1321 
1322     __ bind(update_done);
1323   } else {
1324     __ cbz(obj, *obj_is_null);








1325   }
1326 
1327   if (!k->is_loaded()) {
1328     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1329   } else {
1330     __ mov_metadata(k_RInfo, k->constant_encoding());
1331   }
1332   __ verify_oop(obj);
1333 
1334   if (op->fast_check()) {
1335     // get object class
1336     // not a safepoint as obj null check happens earlier
1337     __ load_klass(rscratch1, obj);
1338     __ cmp( rscratch1, k_RInfo);
1339 
1340     __ br(Assembler::NE, *failure_target);
1341     // successful cast, fall through to profile or jump
1342   } else {
1343     // get object class
1344     // not a safepoint as obj null check happens earlier

1462     __ bind(success);
1463     if (dst != obj) {
1464       __ mov(dst, obj);
1465     }
1466   } else if (code == lir_instanceof) {
1467     Register obj = op->object()->as_register();
1468     Register dst = op->result_opr()->as_register();
1469     Label success, failure, done;
1470     emit_typecheck_helper(op, &success, &failure, &failure);
1471     __ bind(failure);
1472     __ mov(dst, zr);
1473     __ b(done);
1474     __ bind(success);
1475     __ mov(dst, 1);
1476     __ bind(done);
1477   } else {
1478     ShouldNotReachHere();
1479   }
1480 }
1481 










































































































1482 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1483   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1484   __ cset(rscratch1, Assembler::NE);
1485   __ membar(__ AnyAny);
1486 }
1487 
1488 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1489   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1490   __ cset(rscratch1, Assembler::NE);
1491   __ membar(__ AnyAny);
1492 }
1493 
1494 
1495 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1496   Register addr;
1497   if (op->addr()->is_register()) {
1498     addr = as_reg(op->addr());
1499   } else {
1500     assert(op->addr()->is_address(), "what else?");
1501     LIR_Address* addr_ptr = op->addr()->as_address_ptr();

1975     __ cmp(left->as_register_lo(), right->as_register_lo());
1976     __ mov(dst->as_register(), (uint64_t)-1L);
1977     __ br(Assembler::LT, done);
1978     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1979     __ bind(done);
1980   } else {
1981     ShouldNotReachHere();
1982   }
1983 }
1984 
1985 
1986 void LIR_Assembler::align_call(LIR_Code code) {  }
1987 
1988 
1989 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1990   address call = __ trampoline_call(Address(op->addr(), rtype));
1991   if (call == nullptr) {
1992     bailout("trampoline stub overflow");
1993     return;
1994   }
1995   add_call_info(code_offset(), op->info());
1996   __ post_call_nop();
1997 }
1998 
1999 
2000 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2001   address call = __ ic_call(op->addr());
2002   if (call == nullptr) {
2003     bailout("trampoline stub overflow");
2004     return;
2005   }
2006   add_call_info(code_offset(), op->info());
2007   __ post_call_nop();
2008 }
2009 
2010 void LIR_Assembler::emit_static_call_stub() {
2011   address call_pc = __ pc();
2012   address stub = __ start_a_stub(call_stub_size());
2013   if (stub == nullptr) {
2014     bailout("static call stub overflow");
2015     return;
2016   }
2017 
2018   int start = __ offset();
2019 
2020   __ relocate(static_stub_Relocation::spec(call_pc));
2021   __ emit_static_call_stub();
2022 
2023   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2024         <= call_stub_size(), "stub too big");
2025   __ end_a_stub();
2026 }

2149 
2150 
2151 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2152   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2153   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2154   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2155   __ mov (rscratch1, c);
2156   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2157 }
2158 
2159 
2160 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2161   ShouldNotReachHere();
2162   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2163   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2164   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2165   __ lea(rscratch1, __ constant_oop_address(o));
2166   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2167 }
2168 












2169 
2170 // This code replaces a call to arraycopy; no exception may
2171 // be thrown in this code, they must be thrown in the System.arraycopy
2172 // activation frame; we could save some checks if this would not be the case
2173 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2174   ciArrayKlass* default_type = op->expected_type();
2175   Register src = op->src()->as_register();
2176   Register dst = op->dst()->as_register();
2177   Register src_pos = op->src_pos()->as_register();
2178   Register dst_pos = op->dst_pos()->as_register();
2179   Register length  = op->length()->as_register();
2180   Register tmp = op->tmp()->as_register();
2181 
2182   CodeStub* stub = op->stub();
2183   int flags = op->flags();
2184   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2185   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2186 






2187   // if we don't know anything, just go through the generic arraycopy
2188   if (default_type == nullptr // || basic_type == T_OBJECT
2189       ) {
2190     Label done;
2191     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2192 
2193     // Save the arguments in case the generic arraycopy fails and we
2194     // have to fall back to the JNI stub
2195     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2196     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2197     __ str(src,              Address(sp, 4*BytesPerWord));
2198 
2199     address copyfunc_addr = StubRoutines::generic_arraycopy();
2200     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2201 
2202     // The arguments are in java calling convention so we shift them
2203     // to C convention
2204     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2205     __ mov(c_rarg0, j_rarg0);
2206     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2220     __ cbz(r0, *stub->continuation());
2221 
2222     // Reload values from the stack so they are where the stub
2223     // expects them.
2224     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2225     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2226     __ ldr(src,              Address(sp, 4*BytesPerWord));
2227 
2228     // r0 is -1^K where K == partial copied count
2229     __ eonw(rscratch1, r0, zr);
2230     // adjust length down and src/end pos up by partial copied count
2231     __ subw(length, length, rscratch1);
2232     __ addw(src_pos, src_pos, rscratch1);
2233     __ addw(dst_pos, dst_pos, rscratch1);
2234     __ b(*stub->entry());
2235 
2236     __ bind(*stub->continuation());
2237     return;
2238   }
2239 








2240   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2241 
2242   int elem_size = type2aelembytes(basic_type);
2243   int scale = exact_log2(elem_size);
2244 
2245   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2246   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2247 
2248   // test for null
2249   if (flags & LIR_OpArrayCopy::src_null_check) {
2250     __ cbz(src, *stub->entry());
2251   }
2252   if (flags & LIR_OpArrayCopy::dst_null_check) {
2253     __ cbz(dst, *stub->entry());
2254   }
2255 
2256   // If the compiler was not able to prove that exact type of the source or the destination
2257   // of the arraycopy is an array type, check at runtime if the source or the destination is
2258   // an instance type.
2259   if (flags & LIR_OpArrayCopy::type_check) {

2774         __ verify_klass_ptr(tmp);
2775 #endif
2776       } else {
2777         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2778                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2779 
2780         __ ldr(tmp, mdo_addr);
2781         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2782 
2783         __ orr(tmp, tmp, TypeEntries::type_unknown);
2784         __ str(tmp, mdo_addr);
2785         // FIXME: Write barrier needed here?
2786       }
2787     }
2788 
2789     __ bind(next);
2790   }
2791   COMMENT("} emit_profile_type");
2792 }
2793 




















2794 
2795 void LIR_Assembler::align_backward_branch_target() {
2796 }
2797 
2798 
2799 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2800   // tmp must be unused
2801   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2802 
2803   if (left->is_single_cpu()) {
2804     assert(dest->is_single_cpu(), "expect single result reg");
2805     __ negw(dest->as_register(), left->as_register());
2806   } else if (left->is_double_cpu()) {
2807     assert(dest->is_double_cpu(), "expect double result reg");
2808     __ neg(dest->as_register_lo(), left->as_register_lo());
2809   } else if (left->is_single_fpu()) {
2810     assert(dest->is_single_fpu(), "expect single float result reg");
2811     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2812   } else {
2813     assert(left->is_double_fpu(), "expect double float operand reg");

2913 void LIR_Assembler::membar_loadload() {
2914   __ membar(Assembler::LoadLoad);
2915 }
2916 
2917 void LIR_Assembler::membar_storestore() {
2918   __ membar(MacroAssembler::StoreStore);
2919 }
2920 
2921 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2922 
2923 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2924 
2925 void LIR_Assembler::on_spin_wait() {
2926   __ spin_wait();
2927 }
2928 
2929 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2930   __ mov(result_reg->as_register(), rthread);
2931 }
2932 




2933 
2934 void LIR_Assembler::peephole(LIR_List *lir) {
2935 #if 0
2936   if (tableswitch_count >= max_tableswitches)
2937     return;
2938 
2939   /*
2940     This finite-state automaton recognizes sequences of compare-and-
2941     branch instructions.  We will turn them into a tableswitch.  You
2942     could argue that C1 really shouldn't be doing this sort of
2943     optimization, but without it the code is really horrible.
2944   */
2945 
2946   enum { start_s, cmp1_s, beq_s, cmp_s } state;
2947   int first_key, last_key = -2147483648;
2948   int next_key = 0;
2949   int start_insn = -1;
2950   int last_insn = -1;
2951   Register reg = noreg;
2952   LIR_Opr reg_opr;

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 #include "vmreg_aarch64.inline.hpp"
  48 
  49 
  50 #ifndef PRODUCT
  51 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  52 #else
  53 #define COMMENT(x)
  54 #endif
  55 
  56 NEEDS_CLEANUP // remove this definitions ?
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 

 415     if (LockingMode == LM_MONITOR) {
 416       __ b(*stub->entry());
 417     } else {
 418       __ unlock_object(r5, r4, r0, r6, *stub->entry());
 419     }
 420     __ bind(*stub->continuation());
 421   }
 422 
 423   if (compilation()->env()->dtrace_method_probes()) {
 424     __ mov(c_rarg0, rthread);
 425     __ mov_metadata(c_rarg1, method()->constant_encoding());
 426     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 427   }
 428 
 429   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 430     __ mov(r0, r19);  // Restore the exception
 431   }
 432 
 433   // remove the activation and dispatch to the unwind handler
 434   __ block_comment("remove_frame and dispatch to the unwind handler");
 435   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 436   __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 437 
 438   // Emit the slow path assembly
 439   if (stub != nullptr) {
 440     stub->emit_code(this);
 441   }
 442 
 443   return offset;
 444 }
 445 
 446 
 447 int LIR_Assembler::emit_deopt_handler() {
 448   // generate code for exception handler
 449   address handler_base = __ start_a_stub(deopt_handler_size());
 450   if (handler_base == nullptr) {
 451     // not enough space left for the handler
 452     bailout("deopt handler overflow");
 453     return -1;
 454   }
 455 

 459   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 460   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 461   __ end_a_stub();
 462 
 463   return offset;
 464 }
 465 
 466 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 467   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 468   int pc_offset = code_offset();
 469   flush_debug_info(pc_offset);
 470   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 471   if (info->exception_handlers() != nullptr) {
 472     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 473   }
 474 }
 475 
 476 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 477   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 478 
 479   if (InlineTypeReturnedAsFields) {
 480     // Check if we are returning an non-null inline type and load its fields into registers
 481     ciType* return_type = compilation()->method()->return_type();
 482     if (return_type->is_inlinetype()) {
 483       ciInlineKlass* vk = return_type->as_inline_klass();
 484       if (vk->can_be_returned_as_fields()) {
 485         address unpack_handler = vk->unpack_handler();
 486         assert(unpack_handler != nullptr, "must be");
 487         __ far_call(RuntimeAddress(unpack_handler));
 488       }
 489     } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
 490       Label skip;
 491       Label not_null;
 492       __ cbnz(r0, not_null);
 493       // Returned value is null, zero all return registers because they may belong to oop fields
 494       __ mov(j_rarg1, zr);
 495       __ mov(j_rarg2, zr);
 496       __ mov(j_rarg3, zr);
 497       __ mov(j_rarg4, zr);
 498       __ mov(j_rarg5, zr);
 499       __ mov(j_rarg6, zr);
 500       __ mov(j_rarg7, zr);
 501       __ b(skip);
 502       __ bind(not_null);
 503 
 504       // Check if we are returning an non-null inline type and load its fields into registers
 505       __ test_oop_is_not_inline_type(r0, rscratch2, skip, /* can_be_null= */ false);
 506 
 507       // Load fields from a buffered value with an inline class specific handler
 508       __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
 509       __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 510       __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 511       // Unpack handler can be null if inline type is not scalarizable in returns
 512       __ cbz(rscratch1, skip);
 513       __ blr(rscratch1);
 514 
 515       __ bind(skip);
 516     }
 517     // At this point, r0 points to the value object (for interpreter or C1 caller).
 518     // The fields of the object are copied into registers (for C2 caller).
 519   }
 520 
 521   // Pop the stack before the safepoint code
 522   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 523 
 524   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 525     __ reserved_stack_check();
 526   }
 527 
 528   code_stub->set_safepoint_offset(__ offset());
 529   __ relocate(relocInfo::poll_return_type);
 530   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 531   __ ret(lr);
 532 }
 533 
 534 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 535   return (__ store_inline_type_fields_to_buf(vk, false));
 536 }
 537 
 538 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 539   guarantee(info != nullptr, "Shouldn't be null");
 540   __ get_polling_page(rscratch1, relocInfo::poll_type);
 541   add_debug_info_for_branch(info);  // This isn't just debug info:
 542                                     // it's the oop map
 543   __ read_polling_page(rscratch1, relocInfo::poll_type);
 544   return __ offset();
 545 }
 546 
 547 
 548 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 549   if (from_reg == r31_sp)
 550     from_reg = sp;
 551   if (to_reg == r31_sp)
 552     to_reg = sp;
 553   __ mov(to_reg, from_reg);
 554 }
 555 
 556 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 557 

 564   switch (c->type()) {
 565     case T_INT: {
 566       assert(patch_code == lir_patch_none, "no patching handled here");
 567       __ movw(dest->as_register(), c->as_jint());
 568       break;
 569     }
 570 
 571     case T_ADDRESS: {
 572       assert(patch_code == lir_patch_none, "no patching handled here");
 573       __ mov(dest->as_register(), c->as_jint());
 574       break;
 575     }
 576 
 577     case T_LONG: {
 578       assert(patch_code == lir_patch_none, "no patching handled here");
 579       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 580       break;
 581     }
 582 
 583     case T_OBJECT: {
 584         if (patch_code != lir_patch_none) {


 585           jobject2reg_with_patching(dest->as_register(), info);
 586         } else {
 587           jobject2reg(c->as_jobject(), dest->as_register());
 588         }
 589       break;
 590     }
 591 
 592     case T_METADATA: {
 593       if (patch_code != lir_patch_none) {
 594         klass2reg_with_patching(dest->as_register(), info);
 595       } else {
 596         __ mov_metadata(dest->as_register(), c->as_metadata());
 597       }
 598       break;
 599     }
 600 
 601     case T_FLOAT: {
 602       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 603         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 604       } else {
 605         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 606         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 607       }

 677   LIR_Const* c = src->as_constant_ptr();
 678   LIR_Address* to_addr = dest->as_address_ptr();
 679 
 680   void (Assembler::* insn)(Register Rt, const Address &adr);
 681 
 682   switch (type) {
 683   case T_ADDRESS:
 684     assert(c->as_jint() == 0, "should be");
 685     insn = &Assembler::str;
 686     break;
 687   case T_LONG:
 688     assert(c->as_jlong() == 0, "should be");
 689     insn = &Assembler::str;
 690     break;
 691   case T_INT:
 692     assert(c->as_jint() == 0, "should be");
 693     insn = &Assembler::strw;
 694     break;
 695   case T_OBJECT:
 696   case T_ARRAY:
 697     // Non-null case is not handled on aarch64 but handled on x86
 698     // FIXME: do we need to add it here?
 699     assert(c->as_jobject() == nullptr, "should be");
 700     if (UseCompressedOops && !wide) {
 701       insn = &Assembler::strw;
 702     } else {
 703       insn = &Assembler::str;
 704     }
 705     break;
 706   case T_CHAR:
 707   case T_SHORT:
 708     assert(c->as_jint() == 0, "should be");
 709     insn = &Assembler::strh;
 710     break;
 711   case T_BOOLEAN:
 712   case T_BYTE:
 713     assert(c->as_jint() == 0, "should be");
 714     insn = &Assembler::strb;
 715     break;
 716   default:
 717     ShouldNotReachHere();
 718     insn = &Assembler::str;  // unreachable

1026     case T_CHAR:
1027       __ ldrh(dest->as_register(), as_Address(from_addr));
1028       break;
1029     case T_SHORT:
1030       __ ldrsh(dest->as_register(), as_Address(from_addr));
1031       break;
1032 
1033     default:
1034       ShouldNotReachHere();
1035   }
1036 
1037   if (is_reference_type(type)) {
1038     if (UseCompressedOops && !wide) {
1039       __ decode_heap_oop(dest->as_register());
1040     }
1041 
1042     __ verify_oop(dest->as_register());
1043   }
1044 }
1045 
1046 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1047   assert(dst->is_cpu_register(), "must be");
1048   assert(dst->type() == src->type(), "must be");
1049 
1050   if (src->is_cpu_register()) {
1051     reg2reg(src, dst);
1052   } else if (src->is_stack()) {
1053     stack2reg(src, dst, dst->type());
1054   } else if (src->is_constant()) {
1055     const2reg(src, dst, lir_patch_none, nullptr);
1056   } else {
1057     ShouldNotReachHere();
1058   }
1059 }
1060 
1061 int LIR_Assembler::array_element_size(BasicType type) const {
1062   int elem_size = type2aelembytes(type);
1063   return exact_log2(elem_size);
1064 }
1065 
1066 
1067 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1068   switch (op->code()) {
1069   case lir_idiv:
1070   case lir_irem:
1071     arithmetic_idiv(op->code(),
1072                     op->in_opr1(),
1073                     op->in_opr2(),
1074                     op->in_opr3(),
1075                     op->result_opr(),
1076                     op->info());
1077     break;
1078   case lir_fmad:
1079     __ fmaddd(op->result_opr()->as_double_reg(),

1231     __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1232     __ ldarb(rscratch1, rscratch1);
1233     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1234     add_debug_info_for_null_check_here(op->stub()->info());
1235     __ br(Assembler::NE, *op->stub()->entry());
1236   }
1237   __ allocate_object(op->obj()->as_register(),
1238                      op->tmp1()->as_register(),
1239                      op->tmp2()->as_register(),
1240                      op->header_size(),
1241                      op->object_size(),
1242                      op->klass()->as_register(),
1243                      *op->stub()->entry());
1244   __ bind(*op->stub()->continuation());
1245 }
1246 
1247 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1248   Register len =  op->len()->as_register();
1249   __ uxtw(len, len);
1250 
1251   if (UseSlowPath || op->is_null_free() ||
1252       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1253       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1254     __ b(*op->stub()->entry());
1255   } else {
1256     Register tmp1 = op->tmp1()->as_register();
1257     Register tmp2 = op->tmp2()->as_register();
1258     Register tmp3 = op->tmp3()->as_register();
1259     if (len == tmp1) {
1260       tmp1 = tmp3;
1261     } else if (len == tmp2) {
1262       tmp2 = tmp3;
1263     } else if (len == tmp3) {
1264       // everything is ok
1265     } else {
1266       __ mov(tmp3, len);
1267     }
1268     __ allocate_array(op->obj()->as_register(),
1269                       len,
1270                       tmp1,
1271                       tmp2,

1343     assert(data != nullptr,                "need data for type check");
1344     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1345   }
1346   Label* success_target = success;
1347   Label* failure_target = failure;
1348 
1349   if (obj == k_RInfo) {
1350     k_RInfo = dst;
1351   } else if (obj == klass_RInfo) {
1352     klass_RInfo = dst;
1353   }
1354   if (k->is_loaded() && !UseCompressedClassPointers) {
1355     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1356   } else {
1357     Rtmp1 = op->tmp3()->as_register();
1358     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1359   }
1360 
1361   assert_different_registers(obj, k_RInfo, klass_RInfo);
1362 
1363   if (op->need_null_check()) {
1364     if (should_profile) {
1365       Register mdo  = klass_RInfo;
1366       __ mov_metadata(mdo, md->constant_encoding());
1367       Label not_null;
1368       __ cbnz(obj, not_null);
1369       // Object is null; update MDO and exit
1370       Address data_addr
1371         = __ form_address(rscratch2, mdo,
1372                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1373                           0);
1374       __ ldrb(rscratch1, data_addr);
1375       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1376       __ strb(rscratch1, data_addr);
1377       __ b(*obj_is_null);
1378       __ bind(not_null);






1379 
1380       Label update_done;
1381       Register recv = k_RInfo;
1382       __ load_klass(recv, obj);
1383       type_profile_helper(mdo, md, data, recv, &update_done);
1384       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1385       __ addptr(counter_addr, DataLayout::counter_increment);
1386 
1387       __ bind(update_done);
1388     } else {
1389       __ cbz(obj, *obj_is_null);
1390     }
1391   }
1392 
1393   if (!k->is_loaded()) {
1394     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1395   } else {
1396     __ mov_metadata(k_RInfo, k->constant_encoding());
1397   }
1398   __ verify_oop(obj);
1399 
1400   if (op->fast_check()) {
1401     // get object class
1402     // not a safepoint as obj null check happens earlier
1403     __ load_klass(rscratch1, obj);
1404     __ cmp( rscratch1, k_RInfo);
1405 
1406     __ br(Assembler::NE, *failure_target);
1407     // successful cast, fall through to profile or jump
1408   } else {
1409     // get object class
1410     // not a safepoint as obj null check happens earlier

1528     __ bind(success);
1529     if (dst != obj) {
1530       __ mov(dst, obj);
1531     }
1532   } else if (code == lir_instanceof) {
1533     Register obj = op->object()->as_register();
1534     Register dst = op->result_opr()->as_register();
1535     Label success, failure, done;
1536     emit_typecheck_helper(op, &success, &failure, &failure);
1537     __ bind(failure);
1538     __ mov(dst, zr);
1539     __ b(done);
1540     __ bind(success);
1541     __ mov(dst, 1);
1542     __ bind(done);
1543   } else {
1544     ShouldNotReachHere();
1545   }
1546 }
1547 
1548 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1549   // We are loading/storing from/to an array that *may* be a flat array (the
1550   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1551   // If this array is a flat array, take the slow path.
1552   __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1553   if (!op->value()->is_illegal()) {
1554     // The array is not a flat array, but it might be null-free. If we are storing
1555     // a null into a null-free array, take the slow path (which will throw NPE).
1556     Label skip;
1557     __ cbnz(op->value()->as_register(), skip);
1558     __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1559     __ bind(skip);
1560   }
1561 }
1562 
1563 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1564   // We are storing into an array that *may* be null-free (the declared type is
1565   // Object[], abstract[], interface[] or VT.ref[]).
1566   Label test_mark_word;
1567   Register tmp = op->tmp()->as_register();
1568   __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1569   __ tst(tmp, markWord::unlocked_value);
1570   __ br(Assembler::NE, test_mark_word);
1571   __ load_prototype_header(tmp, op->array()->as_register());
1572   __ bind(test_mark_word);
1573   __ tst(tmp, markWord::null_free_array_bit_in_place);
1574 }
1575 
1576 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1577   Label L_oops_equal;
1578   Label L_oops_not_equal;
1579   Label L_end;
1580 
1581   Register left  = op->left()->as_register();
1582   Register right = op->right()->as_register();
1583 
1584   __ cmp(left, right);
1585   __ br(Assembler::EQ, L_oops_equal);
1586 
1587   // (1) Null check -- if one of the operands is null, the other must not be null (because
1588   //     the two references are not equal), so they are not substitutable,
1589   //     FIXME: do null check only if the operand is nullable
1590   {
1591     __ cbz(left, L_oops_not_equal);
1592     __ cbz(right, L_oops_not_equal);
1593   }
1594 
1595   ciKlass* left_klass = op->left_klass();
1596   ciKlass* right_klass = op->right_klass();
1597 
1598   // (2) Inline type check -- if either of the operands is not a inline type,
1599   //     they are not substitutable. We do this only if we are not sure that the
1600   //     operands are inline type
1601   if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1602       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1603     Register tmp1  = op->tmp1()->as_register();
1604     __ mov(tmp1, markWord::inline_type_pattern);
1605     __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1606     __ andr(tmp1, tmp1, rscratch1);
1607     __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1608     __ andr(tmp1, tmp1, rscratch1);
1609     __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1610     __ br(Assembler::NE, L_oops_not_equal);
1611   }
1612 
1613   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1614   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1615     // No need to load klass -- the operands are statically known to be the same inline klass.
1616     __ b(*op->stub()->entry());
1617   } else {
1618     Register left_klass_op = op->left_klass_op()->as_register();
1619     Register right_klass_op = op->right_klass_op()->as_register();
1620 
1621     if (UseCompressedClassPointers) {
1622       __ ldrw(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1623       __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1624       __ cmpw(left_klass_op, right_klass_op);
1625     } else {
1626       __ ldr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1627       __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1628       __ cmp(left_klass_op, right_klass_op);
1629     }
1630 
1631     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1632     // fall through to L_oops_not_equal
1633   }
1634 
1635   __ bind(L_oops_not_equal);
1636   move(op->not_equal_result(), op->result_opr());
1637   __ b(L_end);
1638 
1639   __ bind(L_oops_equal);
1640   move(op->equal_result(), op->result_opr());
1641   __ b(L_end);
1642 
1643   // We've returned from the stub. R0 contains 0x0 IFF the two
1644   // operands are not substitutable. (Don't compare against 0x1 in case the
1645   // C compiler is naughty)
1646   __ bind(*op->stub()->continuation());
1647   __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1648   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1649   // fall-through
1650   __ bind(L_end);
1651 }
1652 
1653 
1654 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1655   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1656   __ cset(rscratch1, Assembler::NE);
1657   __ membar(__ AnyAny);
1658 }
1659 
1660 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1661   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1662   __ cset(rscratch1, Assembler::NE);
1663   __ membar(__ AnyAny);
1664 }
1665 
1666 
1667 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1668   Register addr;
1669   if (op->addr()->is_register()) {
1670     addr = as_reg(op->addr());
1671   } else {
1672     assert(op->addr()->is_address(), "what else?");
1673     LIR_Address* addr_ptr = op->addr()->as_address_ptr();

2147     __ cmp(left->as_register_lo(), right->as_register_lo());
2148     __ mov(dst->as_register(), (uint64_t)-1L);
2149     __ br(Assembler::LT, done);
2150     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2151     __ bind(done);
2152   } else {
2153     ShouldNotReachHere();
2154   }
2155 }
2156 
2157 
2158 void LIR_Assembler::align_call(LIR_Code code) {  }
2159 
2160 
2161 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2162   address call = __ trampoline_call(Address(op->addr(), rtype));
2163   if (call == nullptr) {
2164     bailout("trampoline stub overflow");
2165     return;
2166   }
2167   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2168   __ post_call_nop();
2169 }
2170 
2171 
2172 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2173   address call = __ ic_call(op->addr());
2174   if (call == nullptr) {
2175     bailout("trampoline stub overflow");
2176     return;
2177   }
2178   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2179   __ post_call_nop();
2180 }
2181 
2182 void LIR_Assembler::emit_static_call_stub() {
2183   address call_pc = __ pc();
2184   address stub = __ start_a_stub(call_stub_size());
2185   if (stub == nullptr) {
2186     bailout("static call stub overflow");
2187     return;
2188   }
2189 
2190   int start = __ offset();
2191 
2192   __ relocate(static_stub_Relocation::spec(call_pc));
2193   __ emit_static_call_stub();
2194 
2195   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2196         <= call_stub_size(), "stub too big");
2197   __ end_a_stub();
2198 }

2321 
2322 
2323 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2324   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2325   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2326   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2327   __ mov (rscratch1, c);
2328   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2329 }
2330 
2331 
2332 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2333   ShouldNotReachHere();
2334   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2335   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2336   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2337   __ lea(rscratch1, __ constant_oop_address(o));
2338   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2339 }
2340 
2341 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2342   if (null_check) {
2343     __ cbz(obj, *slow_path->entry());
2344   }
2345   if (is_dest) {
2346     __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2347     // TODO 8350865 Flat no longer implies null-free, so we need to check for flat dest. Can we do better here?
2348     __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2349   } else {
2350     __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2351   }
2352 }
2353 
2354 // This code replaces a call to arraycopy; no exception may
2355 // be thrown in this code, they must be thrown in the System.arraycopy
2356 // activation frame; we could save some checks if this would not be the case
2357 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2358   ciArrayKlass* default_type = op->expected_type();
2359   Register src = op->src()->as_register();
2360   Register dst = op->dst()->as_register();
2361   Register src_pos = op->src_pos()->as_register();
2362   Register dst_pos = op->dst_pos()->as_register();
2363   Register length  = op->length()->as_register();
2364   Register tmp = op->tmp()->as_register();
2365 
2366   CodeStub* stub = op->stub();
2367   int flags = op->flags();
2368   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2369   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2370 
2371   if (flags & LIR_OpArrayCopy::always_slow_path) {
2372     __ b(*stub->entry());
2373     __ bind(*stub->continuation());
2374     return;
2375   }
2376 
2377   // if we don't know anything, just go through the generic arraycopy
2378   if (default_type == nullptr // || basic_type == T_OBJECT
2379       ) {
2380     Label done;
2381     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2382 
2383     // Save the arguments in case the generic arraycopy fails and we
2384     // have to fall back to the JNI stub
2385     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2386     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2387     __ str(src,              Address(sp, 4*BytesPerWord));
2388 
2389     address copyfunc_addr = StubRoutines::generic_arraycopy();
2390     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2391 
2392     // The arguments are in java calling convention so we shift them
2393     // to C convention
2394     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2395     __ mov(c_rarg0, j_rarg0);
2396     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2410     __ cbz(r0, *stub->continuation());
2411 
2412     // Reload values from the stack so they are where the stub
2413     // expects them.
2414     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2415     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2416     __ ldr(src,              Address(sp, 4*BytesPerWord));
2417 
2418     // r0 is -1^K where K == partial copied count
2419     __ eonw(rscratch1, r0, zr);
2420     // adjust length down and src/end pos up by partial copied count
2421     __ subw(length, length, rscratch1);
2422     __ addw(src_pos, src_pos, rscratch1);
2423     __ addw(dst_pos, dst_pos, rscratch1);
2424     __ b(*stub->entry());
2425 
2426     __ bind(*stub->continuation());
2427     return;
2428   }
2429 
2430   // Handle inline type arrays
2431   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2432     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2433   }
2434   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2435     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2436   }
2437 
2438   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2439 
2440   int elem_size = type2aelembytes(basic_type);
2441   int scale = exact_log2(elem_size);
2442 
2443   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2444   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2445 
2446   // test for null
2447   if (flags & LIR_OpArrayCopy::src_null_check) {
2448     __ cbz(src, *stub->entry());
2449   }
2450   if (flags & LIR_OpArrayCopy::dst_null_check) {
2451     __ cbz(dst, *stub->entry());
2452   }
2453 
2454   // If the compiler was not able to prove that exact type of the source or the destination
2455   // of the arraycopy is an array type, check at runtime if the source or the destination is
2456   // an instance type.
2457   if (flags & LIR_OpArrayCopy::type_check) {

2972         __ verify_klass_ptr(tmp);
2973 #endif
2974       } else {
2975         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2976                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2977 
2978         __ ldr(tmp, mdo_addr);
2979         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2980 
2981         __ orr(tmp, tmp, TypeEntries::type_unknown);
2982         __ str(tmp, mdo_addr);
2983         // FIXME: Write barrier needed here?
2984       }
2985     }
2986 
2987     __ bind(next);
2988   }
2989   COMMENT("} emit_profile_type");
2990 }
2991 
2992 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
2993   Register obj = op->obj()->as_register();
2994   Register tmp = op->tmp()->as_pointer_register();
2995   bool not_null = op->not_null();
2996   int flag = op->flag();
2997 
2998   Label not_inline_type;
2999   if (!not_null) {
3000     __ cbz(obj, not_inline_type);
3001   }
3002 
3003   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3004 
3005   Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
3006   __ ldrb(rscratch1, mdo_addr);
3007   __ orr(rscratch1, rscratch1, flag);
3008   __ strb(rscratch1, mdo_addr);
3009 
3010   __ bind(not_inline_type);
3011 }
3012 
3013 void LIR_Assembler::align_backward_branch_target() {
3014 }
3015 
3016 
3017 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3018   // tmp must be unused
3019   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3020 
3021   if (left->is_single_cpu()) {
3022     assert(dest->is_single_cpu(), "expect single result reg");
3023     __ negw(dest->as_register(), left->as_register());
3024   } else if (left->is_double_cpu()) {
3025     assert(dest->is_double_cpu(), "expect double result reg");
3026     __ neg(dest->as_register_lo(), left->as_register_lo());
3027   } else if (left->is_single_fpu()) {
3028     assert(dest->is_single_fpu(), "expect single float result reg");
3029     __ fnegs(dest->as_float_reg(), left->as_float_reg());
3030   } else {
3031     assert(left->is_double_fpu(), "expect double float operand reg");

3131 void LIR_Assembler::membar_loadload() {
3132   __ membar(Assembler::LoadLoad);
3133 }
3134 
3135 void LIR_Assembler::membar_storestore() {
3136   __ membar(MacroAssembler::StoreStore);
3137 }
3138 
3139 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3140 
3141 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3142 
3143 void LIR_Assembler::on_spin_wait() {
3144   __ spin_wait();
3145 }
3146 
3147 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3148   __ mov(result_reg->as_register(), rthread);
3149 }
3150 
3151 void LIR_Assembler::check_orig_pc() {
3152   __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3153   __ cmp(rscratch2, (u1)NULL_WORD);
3154 }
3155 
3156 void LIR_Assembler::peephole(LIR_List *lir) {
3157 #if 0
3158   if (tableswitch_count >= max_tableswitches)
3159     return;
3160 
3161   /*
3162     This finite-state automaton recognizes sequences of compare-and-
3163     branch instructions.  We will turn them into a tableswitch.  You
3164     could argue that C1 really shouldn't be doing this sort of
3165     optimization, but without it the code is really horrible.
3166   */
3167 
3168   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3169   int first_key, last_key = -2147483648;
3170   int next_key = 0;
3171   int start_insn = -1;
3172   int last_insn = -1;
3173   Register reg = noreg;
3174   LIR_Opr reg_opr;
< prev index next >