< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"

  35 #include "ci/ciInstance.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_aarch64.hpp"
  40 #include "oops/objArrayKlass.hpp"

  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_aarch64.inline.hpp"
  46 
  47 
  48 #ifndef PRODUCT
  49 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  50 #else
  51 #define COMMENT(x)
  52 #endif
  53 
  54 NEEDS_CLEANUP // remove this definitions ?
  55 const Register SYNC_header = r0;   // synchronization header
  56 const Register SHIFT_count = r0;   // where count for shift operations must be
  57 
  58 #define __ _masm->
  59 
  60 

 413     if (LockingMode == LM_MONITOR) {
 414       __ b(*stub->entry());
 415     } else {
 416       __ unlock_object(r5, r4, r0, r6, *stub->entry());
 417     }
 418     __ bind(*stub->continuation());
 419   }
 420 
 421   if (compilation()->env()->dtrace_method_probes()) {
 422     __ mov(c_rarg0, rthread);
 423     __ mov_metadata(c_rarg1, method()->constant_encoding());
 424     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 425   }
 426 
 427   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 428     __ mov(r0, r19);  // Restore the exception
 429   }
 430 
 431   // remove the activation and dispatch to the unwind handler
 432   __ block_comment("remove_frame and dispatch to the unwind handler");
 433   __ remove_frame(initial_frame_size_in_bytes());
 434   __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 435 
 436   // Emit the slow path assembly
 437   if (stub != nullptr) {
 438     stub->emit_code(this);
 439   }
 440 
 441   return offset;
 442 }
 443 
 444 
 445 int LIR_Assembler::emit_deopt_handler() {
 446   // generate code for exception handler
 447   address handler_base = __ start_a_stub(deopt_handler_size());
 448   if (handler_base == nullptr) {
 449     // not enough space left for the handler
 450     bailout("deopt handler overflow");
 451     return -1;
 452   }
 453 

 457   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 458   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 459   __ end_a_stub();
 460 
 461   return offset;
 462 }
 463 
 464 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 465   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 466   int pc_offset = code_offset();
 467   flush_debug_info(pc_offset);
 468   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 469   if (info->exception_handlers() != nullptr) {
 470     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 471   }
 472 }
 473 
 474 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 475   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 476 




























 477   // Pop the stack before the safepoint code
 478   __ remove_frame(initial_frame_size_in_bytes());
 479 
 480   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 481     __ reserved_stack_check();
 482   }
 483 
 484   code_stub->set_safepoint_offset(__ offset());
 485   __ relocate(relocInfo::poll_return_type);
 486   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 487   __ ret(lr);
 488 }
 489 




 490 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 491   guarantee(info != nullptr, "Shouldn't be null");
 492   __ get_polling_page(rscratch1, relocInfo::poll_type);
 493   add_debug_info_for_branch(info);  // This isn't just debug info:
 494                                     // it's the oop map
 495   __ read_polling_page(rscratch1, relocInfo::poll_type);
 496   return __ offset();
 497 }
 498 
 499 
 500 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 501   if (from_reg == r31_sp)
 502     from_reg = sp;
 503   if (to_reg == r31_sp)
 504     to_reg = sp;
 505   __ mov(to_reg, from_reg);
 506 }
 507 
 508 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 509 

 516   switch (c->type()) {
 517     case T_INT: {
 518       assert(patch_code == lir_patch_none, "no patching handled here");
 519       __ movw(dest->as_register(), c->as_jint());
 520       break;
 521     }
 522 
 523     case T_ADDRESS: {
 524       assert(patch_code == lir_patch_none, "no patching handled here");
 525       __ mov(dest->as_register(), c->as_jint());
 526       break;
 527     }
 528 
 529     case T_LONG: {
 530       assert(patch_code == lir_patch_none, "no patching handled here");
 531       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 532       break;
 533     }
 534 
 535     case T_OBJECT: {
 536         if (patch_code == lir_patch_none) {
 537           jobject2reg(c->as_jobject(), dest->as_register());
 538         } else {
 539           jobject2reg_with_patching(dest->as_register(), info);


 540         }
 541       break;
 542     }
 543 
 544     case T_METADATA: {
 545       if (patch_code != lir_patch_none) {
 546         klass2reg_with_patching(dest->as_register(), info);
 547       } else {
 548         __ mov_metadata(dest->as_register(), c->as_metadata());
 549       }
 550       break;
 551     }
 552 
 553     case T_FLOAT: {
 554       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 555         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 556       } else {
 557         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 558         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 559       }

 629   LIR_Const* c = src->as_constant_ptr();
 630   LIR_Address* to_addr = dest->as_address_ptr();
 631 
 632   void (Assembler::* insn)(Register Rt, const Address &adr);
 633 
 634   switch (type) {
 635   case T_ADDRESS:
 636     assert(c->as_jint() == 0, "should be");
 637     insn = &Assembler::str;
 638     break;
 639   case T_LONG:
 640     assert(c->as_jlong() == 0, "should be");
 641     insn = &Assembler::str;
 642     break;
 643   case T_INT:
 644     assert(c->as_jint() == 0, "should be");
 645     insn = &Assembler::strw;
 646     break;
 647   case T_OBJECT:
 648   case T_ARRAY:


 649     assert(c->as_jobject() == nullptr, "should be");
 650     if (UseCompressedOops && !wide) {
 651       insn = &Assembler::strw;
 652     } else {
 653       insn = &Assembler::str;
 654     }
 655     break;
 656   case T_CHAR:
 657   case T_SHORT:
 658     assert(c->as_jint() == 0, "should be");
 659     insn = &Assembler::strh;
 660     break;
 661   case T_BOOLEAN:
 662   case T_BYTE:
 663     assert(c->as_jint() == 0, "should be");
 664     insn = &Assembler::strb;
 665     break;
 666   default:
 667     ShouldNotReachHere();
 668     insn = &Assembler::str;  // unreachable

 976     case T_CHAR:
 977       __ ldrh(dest->as_register(), as_Address(from_addr));
 978       break;
 979     case T_SHORT:
 980       __ ldrsh(dest->as_register(), as_Address(from_addr));
 981       break;
 982 
 983     default:
 984       ShouldNotReachHere();
 985   }
 986 
 987   if (is_reference_type(type)) {
 988     if (UseCompressedOops && !wide) {
 989       __ decode_heap_oop(dest->as_register());
 990     }
 991 
 992     __ verify_oop(dest->as_register());
 993   }
 994 }
 995 














 996 
 997 int LIR_Assembler::array_element_size(BasicType type) const {
 998   int elem_size = type2aelembytes(type);
 999   return exact_log2(elem_size);
1000 }
1001 
1002 
1003 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1004   switch (op->code()) {
1005   case lir_idiv:
1006   case lir_irem:
1007     arithmetic_idiv(op->code(),
1008                     op->in_opr1(),
1009                     op->in_opr2(),
1010                     op->in_opr3(),
1011                     op->result_opr(),
1012                     op->info());
1013     break;
1014   case lir_fmad:
1015     __ fmaddd(op->result_opr()->as_double_reg(),

1167     __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1168     __ ldarb(rscratch1, rscratch1);
1169     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1170     add_debug_info_for_null_check_here(op->stub()->info());
1171     __ br(Assembler::NE, *op->stub()->entry());
1172   }
1173   __ allocate_object(op->obj()->as_register(),
1174                      op->tmp1()->as_register(),
1175                      op->tmp2()->as_register(),
1176                      op->header_size(),
1177                      op->object_size(),
1178                      op->klass()->as_register(),
1179                      *op->stub()->entry());
1180   __ bind(*op->stub()->continuation());
1181 }
1182 
1183 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1184   Register len =  op->len()->as_register();
1185   __ uxtw(len, len);
1186 
1187   if (UseSlowPath ||
1188       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1189       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1190     __ b(*op->stub()->entry());
1191   } else {
1192     Register tmp1 = op->tmp1()->as_register();
1193     Register tmp2 = op->tmp2()->as_register();
1194     Register tmp3 = op->tmp3()->as_register();
1195     if (len == tmp1) {
1196       tmp1 = tmp3;
1197     } else if (len == tmp2) {
1198       tmp2 = tmp3;
1199     } else if (len == tmp3) {
1200       // everything is ok
1201     } else {
1202       __ mov(tmp3, len);
1203     }
1204     __ allocate_array(op->obj()->as_register(),
1205                       len,
1206                       tmp1,
1207                       tmp2,

1279     assert(data != nullptr,                "need data for type check");
1280     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1281   }
1282   Label* success_target = success;
1283   Label* failure_target = failure;
1284 
1285   if (obj == k_RInfo) {
1286     k_RInfo = dst;
1287   } else if (obj == klass_RInfo) {
1288     klass_RInfo = dst;
1289   }
1290   if (k->is_loaded() && !UseCompressedClassPointers) {
1291     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1292   } else {
1293     Rtmp1 = op->tmp3()->as_register();
1294     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1295   }
1296 
1297   assert_different_registers(obj, k_RInfo, klass_RInfo);
1298 
1299   if (should_profile) {
1300     Register mdo  = klass_RInfo;
1301     __ mov_metadata(mdo, md->constant_encoding());
1302     Label not_null;
1303     __ cbnz(obj, not_null);
1304     // Object is null; update MDO and exit
1305     Address data_addr
1306       = __ form_address(rscratch2, mdo,
1307                         md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1308                         0);
1309     __ ldrb(rscratch1, data_addr);
1310     __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1311     __ strb(rscratch1, data_addr);
1312     __ b(*obj_is_null);
1313     __ bind(not_null);
1314 
1315     Label update_done;
1316     Register recv = k_RInfo;
1317     __ load_klass(recv, obj);
1318     type_profile_helper(mdo, md, data, recv, &update_done);
1319     Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1320     __ addptr(counter_addr, DataLayout::counter_increment);
1321 
1322     __ bind(update_done);
1323   } else {
1324     __ cbz(obj, *obj_is_null);








1325   }
1326 
1327   if (!k->is_loaded()) {
1328     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1329   } else {
1330     __ mov_metadata(k_RInfo, k->constant_encoding());
1331   }
1332   __ verify_oop(obj);
1333 
1334   if (op->fast_check()) {
1335     // get object class
1336     // not a safepoint as obj null check happens earlier
1337     __ load_klass(rscratch1, obj);
1338     __ cmp( rscratch1, k_RInfo);
1339 
1340     __ br(Assembler::NE, *failure_target);
1341     // successful cast, fall through to profile or jump
1342   } else {
1343     // get object class
1344     // not a safepoint as obj null check happens earlier

1462     __ bind(success);
1463     if (dst != obj) {
1464       __ mov(dst, obj);
1465     }
1466   } else if (code == lir_instanceof) {
1467     Register obj = op->object()->as_register();
1468     Register dst = op->result_opr()->as_register();
1469     Label success, failure, done;
1470     emit_typecheck_helper(op, &success, &failure, &failure);
1471     __ bind(failure);
1472     __ mov(dst, zr);
1473     __ b(done);
1474     __ bind(success);
1475     __ mov(dst, 1);
1476     __ bind(done);
1477   } else {
1478     ShouldNotReachHere();
1479   }
1480 }
1481 










































































































1482 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1483   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1484   __ cset(rscratch1, Assembler::NE);
1485   __ membar(__ AnyAny);
1486 }
1487 
1488 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1489   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1490   __ cset(rscratch1, Assembler::NE);
1491   __ membar(__ AnyAny);
1492 }
1493 
1494 
1495 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1496   Register addr;
1497   if (op->addr()->is_register()) {
1498     addr = as_reg(op->addr());
1499   } else {
1500     assert(op->addr()->is_address(), "what else?");
1501     LIR_Address* addr_ptr = op->addr()->as_address_ptr();

1978     __ cmp(left->as_register_lo(), right->as_register_lo());
1979     __ mov(dst->as_register(), (uint64_t)-1L);
1980     __ br(Assembler::LT, done);
1981     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1982     __ bind(done);
1983   } else {
1984     ShouldNotReachHere();
1985   }
1986 }
1987 
1988 
1989 void LIR_Assembler::align_call(LIR_Code code) {  }
1990 
1991 
1992 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1993   address call = __ trampoline_call(Address(op->addr(), rtype));
1994   if (call == nullptr) {
1995     bailout("trampoline stub overflow");
1996     return;
1997   }
1998   add_call_info(code_offset(), op->info());
1999   __ post_call_nop();
2000 }
2001 
2002 
2003 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2004   address call = __ ic_call(op->addr());
2005   if (call == nullptr) {
2006     bailout("trampoline stub overflow");
2007     return;
2008   }
2009   add_call_info(code_offset(), op->info());
2010   __ post_call_nop();
2011 }
2012 
2013 void LIR_Assembler::emit_static_call_stub() {
2014   address call_pc = __ pc();
2015   address stub = __ start_a_stub(call_stub_size());
2016   if (stub == nullptr) {
2017     bailout("static call stub overflow");
2018     return;
2019   }
2020 
2021   int start = __ offset();
2022 
2023   __ relocate(static_stub_Relocation::spec(call_pc));
2024   __ emit_static_call_stub();
2025 
2026   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2027         <= call_stub_size(), "stub too big");
2028   __ end_a_stub();
2029 }

2152 
2153 
2154 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2155   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2156   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2157   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2158   __ mov (rscratch1, c);
2159   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2160 }
2161 
2162 
2163 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2164   ShouldNotReachHere();
2165   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2166   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2167   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2168   __ lea(rscratch1, __ constant_oop_address(o));
2169   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2170 }
2171 










2172 
2173 // This code replaces a call to arraycopy; no exception may
2174 // be thrown in this code, they must be thrown in the System.arraycopy
2175 // activation frame; we could save some checks if this would not be the case
2176 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2177   ciArrayKlass* default_type = op->expected_type();
2178   Register src = op->src()->as_register();
2179   Register dst = op->dst()->as_register();
2180   Register src_pos = op->src_pos()->as_register();
2181   Register dst_pos = op->dst_pos()->as_register();
2182   Register length  = op->length()->as_register();
2183   Register tmp = op->tmp()->as_register();
2184 
2185   CodeStub* stub = op->stub();
2186   int flags = op->flags();
2187   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2188   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2189 






2190   // if we don't know anything, just go through the generic arraycopy
2191   if (default_type == nullptr // || basic_type == T_OBJECT
2192       ) {
2193     Label done;
2194     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2195 
2196     // Save the arguments in case the generic arraycopy fails and we
2197     // have to fall back to the JNI stub
2198     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2199     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2200     __ str(src,              Address(sp, 4*BytesPerWord));
2201 
2202     address copyfunc_addr = StubRoutines::generic_arraycopy();
2203     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2204 
2205     // The arguments are in java calling convention so we shift them
2206     // to C convention
2207     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2208     __ mov(c_rarg0, j_rarg0);
2209     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2223     __ cbz(r0, *stub->continuation());
2224 
2225     // Reload values from the stack so they are where the stub
2226     // expects them.
2227     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2228     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2229     __ ldr(src,              Address(sp, 4*BytesPerWord));
2230 
2231     // r0 is -1^K where K == partial copied count
2232     __ eonw(rscratch1, r0, zr);
2233     // adjust length down and src/end pos up by partial copied count
2234     __ subw(length, length, rscratch1);
2235     __ addw(src_pos, src_pos, rscratch1);
2236     __ addw(dst_pos, dst_pos, rscratch1);
2237     __ b(*stub->entry());
2238 
2239     __ bind(*stub->continuation());
2240     return;
2241   }
2242 









2243   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2244 
2245   int elem_size = type2aelembytes(basic_type);
2246   int scale = exact_log2(elem_size);
2247 
2248   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2249   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2250 
2251   // test for null
2252   if (flags & LIR_OpArrayCopy::src_null_check) {
2253     __ cbz(src, *stub->entry());
2254   }
2255   if (flags & LIR_OpArrayCopy::dst_null_check) {
2256     __ cbz(dst, *stub->entry());
2257   }
2258 
2259   // If the compiler was not able to prove that exact type of the source or the destination
2260   // of the arraycopy is an array type, check at runtime if the source or the destination is
2261   // an instance type.
2262   if (flags & LIR_OpArrayCopy::type_check) {

2777         __ verify_klass_ptr(tmp);
2778 #endif
2779       } else {
2780         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2781                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2782 
2783         __ ldr(tmp, mdo_addr);
2784         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2785 
2786         __ orr(tmp, tmp, TypeEntries::type_unknown);
2787         __ str(tmp, mdo_addr);
2788         // FIXME: Write barrier needed here?
2789       }
2790     }
2791 
2792     __ bind(next);
2793   }
2794   COMMENT("} emit_profile_type");
2795 }
2796 




















2797 
2798 void LIR_Assembler::align_backward_branch_target() {
2799 }
2800 
2801 
2802 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2803   // tmp must be unused
2804   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2805 
2806   if (left->is_single_cpu()) {
2807     assert(dest->is_single_cpu(), "expect single result reg");
2808     __ negw(dest->as_register(), left->as_register());
2809   } else if (left->is_double_cpu()) {
2810     assert(dest->is_double_cpu(), "expect double result reg");
2811     __ neg(dest->as_register_lo(), left->as_register_lo());
2812   } else if (left->is_single_fpu()) {
2813     assert(dest->is_single_fpu(), "expect single float result reg");
2814     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2815   } else {
2816     assert(left->is_double_fpu(), "expect double float operand reg");

2917 void LIR_Assembler::membar_loadload() {
2918   __ membar(Assembler::LoadLoad);
2919 }
2920 
2921 void LIR_Assembler::membar_storestore() {
2922   __ membar(MacroAssembler::StoreStore);
2923 }
2924 
2925 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2926 
2927 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2928 
2929 void LIR_Assembler::on_spin_wait() {
2930   __ spin_wait();
2931 }
2932 
2933 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2934   __ mov(result_reg->as_register(), rthread);
2935 }
2936 




2937 
2938 void LIR_Assembler::peephole(LIR_List *lir) {
2939 #if 0
2940   if (tableswitch_count >= max_tableswitches)
2941     return;
2942 
2943   /*
2944     This finite-state automaton recognizes sequences of compare-and-
2945     branch instructions.  We will turn them into a tableswitch.  You
2946     could argue that C1 really shouldn't be doing this sort of
2947     optimization, but without it the code is really horrible.
2948   */
2949 
2950   enum { start_s, cmp1_s, beq_s, cmp_s } state;
2951   int first_key, last_key = -2147483648;
2952   int next_key = 0;
2953   int start_insn = -1;
2954   int last_insn = -1;
2955   Register reg = noreg;
2956   LIR_Opr reg_opr;

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 #include "vmreg_aarch64.inline.hpp"
  48 
  49 
  50 #ifndef PRODUCT
  51 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  52 #else
  53 #define COMMENT(x)
  54 #endif
  55 
  56 NEEDS_CLEANUP // remove this definitions ?
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 

 415     if (LockingMode == LM_MONITOR) {
 416       __ b(*stub->entry());
 417     } else {
 418       __ unlock_object(r5, r4, r0, r6, *stub->entry());
 419     }
 420     __ bind(*stub->continuation());
 421   }
 422 
 423   if (compilation()->env()->dtrace_method_probes()) {
 424     __ mov(c_rarg0, rthread);
 425     __ mov_metadata(c_rarg1, method()->constant_encoding());
 426     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 427   }
 428 
 429   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 430     __ mov(r0, r19);  // Restore the exception
 431   }
 432 
 433   // remove the activation and dispatch to the unwind handler
 434   __ block_comment("remove_frame and dispatch to the unwind handler");
 435   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 436   __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 437 
 438   // Emit the slow path assembly
 439   if (stub != nullptr) {
 440     stub->emit_code(this);
 441   }
 442 
 443   return offset;
 444 }
 445 
 446 
 447 int LIR_Assembler::emit_deopt_handler() {
 448   // generate code for exception handler
 449   address handler_base = __ start_a_stub(deopt_handler_size());
 450   if (handler_base == nullptr) {
 451     // not enough space left for the handler
 452     bailout("deopt handler overflow");
 453     return -1;
 454   }
 455 

 459   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 460   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 461   __ end_a_stub();
 462 
 463   return offset;
 464 }
 465 
 466 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 467   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 468   int pc_offset = code_offset();
 469   flush_debug_info(pc_offset);
 470   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 471   if (info->exception_handlers() != nullptr) {
 472     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 473   }
 474 }
 475 
 476 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 477   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 478 
 479   if (InlineTypeReturnedAsFields) {
 480     // Check if we are returning an non-null inline type and load its fields into registers
 481     ciType* return_type = compilation()->method()->return_type();
 482     if (return_type->is_inlinetype()) {
 483       ciInlineKlass* vk = return_type->as_inline_klass();
 484       if (vk->can_be_returned_as_fields()) {
 485         address unpack_handler = vk->unpack_handler();
 486         assert(unpack_handler != nullptr, "must be");
 487         __ far_call(RuntimeAddress(unpack_handler));
 488       }
 489     } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
 490       Label skip;
 491       __ test_oop_is_not_inline_type(r0, rscratch2, skip);
 492 
 493       // Load fields from a buffered value with an inline class specific handler
 494       __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
 495       __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 496       __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 497       // Unpack handler can be null if inline type is not scalarizable in returns
 498       __ cbz(rscratch1, skip);
 499       __ blr(rscratch1);
 500 
 501       __ bind(skip);
 502     }
 503     // At this point, r0 points to the value object (for interpreter or C1 caller).
 504     // The fields of the object are copied into registers (for C2 caller).
 505   }
 506 
 507   // Pop the stack before the safepoint code
 508   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 509 
 510   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 511     __ reserved_stack_check();
 512   }
 513 
 514   code_stub->set_safepoint_offset(__ offset());
 515   __ relocate(relocInfo::poll_return_type);
 516   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 517   __ ret(lr);
 518 }
 519 
 520 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 521   return (__ store_inline_type_fields_to_buf(vk, false));
 522 }
 523 
 524 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 525   guarantee(info != nullptr, "Shouldn't be null");
 526   __ get_polling_page(rscratch1, relocInfo::poll_type);
 527   add_debug_info_for_branch(info);  // This isn't just debug info:
 528                                     // it's the oop map
 529   __ read_polling_page(rscratch1, relocInfo::poll_type);
 530   return __ offset();
 531 }
 532 
 533 
 534 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 535   if (from_reg == r31_sp)
 536     from_reg = sp;
 537   if (to_reg == r31_sp)
 538     to_reg = sp;
 539   __ mov(to_reg, from_reg);
 540 }
 541 
 542 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 543 

 550   switch (c->type()) {
 551     case T_INT: {
 552       assert(patch_code == lir_patch_none, "no patching handled here");
 553       __ movw(dest->as_register(), c->as_jint());
 554       break;
 555     }
 556 
 557     case T_ADDRESS: {
 558       assert(patch_code == lir_patch_none, "no patching handled here");
 559       __ mov(dest->as_register(), c->as_jint());
 560       break;
 561     }
 562 
 563     case T_LONG: {
 564       assert(patch_code == lir_patch_none, "no patching handled here");
 565       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 566       break;
 567     }
 568 
 569     case T_OBJECT: {
 570         if (patch_code != lir_patch_none) {


 571           jobject2reg_with_patching(dest->as_register(), info);
 572         } else {
 573           jobject2reg(c->as_jobject(), dest->as_register());
 574         }
 575       break;
 576     }
 577 
 578     case T_METADATA: {
 579       if (patch_code != lir_patch_none) {
 580         klass2reg_with_patching(dest->as_register(), info);
 581       } else {
 582         __ mov_metadata(dest->as_register(), c->as_metadata());
 583       }
 584       break;
 585     }
 586 
 587     case T_FLOAT: {
 588       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 589         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 590       } else {
 591         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 592         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 593       }

 663   LIR_Const* c = src->as_constant_ptr();
 664   LIR_Address* to_addr = dest->as_address_ptr();
 665 
 666   void (Assembler::* insn)(Register Rt, const Address &adr);
 667 
 668   switch (type) {
 669   case T_ADDRESS:
 670     assert(c->as_jint() == 0, "should be");
 671     insn = &Assembler::str;
 672     break;
 673   case T_LONG:
 674     assert(c->as_jlong() == 0, "should be");
 675     insn = &Assembler::str;
 676     break;
 677   case T_INT:
 678     assert(c->as_jint() == 0, "should be");
 679     insn = &Assembler::strw;
 680     break;
 681   case T_OBJECT:
 682   case T_ARRAY:
 683     // Non-null case is not handled on aarch64 but handled on x86
 684     // FIXME: do we need to add it here?
 685     assert(c->as_jobject() == nullptr, "should be");
 686     if (UseCompressedOops && !wide) {
 687       insn = &Assembler::strw;
 688     } else {
 689       insn = &Assembler::str;
 690     }
 691     break;
 692   case T_CHAR:
 693   case T_SHORT:
 694     assert(c->as_jint() == 0, "should be");
 695     insn = &Assembler::strh;
 696     break;
 697   case T_BOOLEAN:
 698   case T_BYTE:
 699     assert(c->as_jint() == 0, "should be");
 700     insn = &Assembler::strb;
 701     break;
 702   default:
 703     ShouldNotReachHere();
 704     insn = &Assembler::str;  // unreachable

1012     case T_CHAR:
1013       __ ldrh(dest->as_register(), as_Address(from_addr));
1014       break;
1015     case T_SHORT:
1016       __ ldrsh(dest->as_register(), as_Address(from_addr));
1017       break;
1018 
1019     default:
1020       ShouldNotReachHere();
1021   }
1022 
1023   if (is_reference_type(type)) {
1024     if (UseCompressedOops && !wide) {
1025       __ decode_heap_oop(dest->as_register());
1026     }
1027 
1028     __ verify_oop(dest->as_register());
1029   }
1030 }
1031 
1032 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1033   assert(dst->is_cpu_register(), "must be");
1034   assert(dst->type() == src->type(), "must be");
1035 
1036   if (src->is_cpu_register()) {
1037     reg2reg(src, dst);
1038   } else if (src->is_stack()) {
1039     stack2reg(src, dst, dst->type());
1040   } else if (src->is_constant()) {
1041     const2reg(src, dst, lir_patch_none, nullptr);
1042   } else {
1043     ShouldNotReachHere();
1044   }
1045 }
1046 
1047 int LIR_Assembler::array_element_size(BasicType type) const {
1048   int elem_size = type2aelembytes(type);
1049   return exact_log2(elem_size);
1050 }
1051 
1052 
1053 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1054   switch (op->code()) {
1055   case lir_idiv:
1056   case lir_irem:
1057     arithmetic_idiv(op->code(),
1058                     op->in_opr1(),
1059                     op->in_opr2(),
1060                     op->in_opr3(),
1061                     op->result_opr(),
1062                     op->info());
1063     break;
1064   case lir_fmad:
1065     __ fmaddd(op->result_opr()->as_double_reg(),

1217     __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1218     __ ldarb(rscratch1, rscratch1);
1219     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1220     add_debug_info_for_null_check_here(op->stub()->info());
1221     __ br(Assembler::NE, *op->stub()->entry());
1222   }
1223   __ allocate_object(op->obj()->as_register(),
1224                      op->tmp1()->as_register(),
1225                      op->tmp2()->as_register(),
1226                      op->header_size(),
1227                      op->object_size(),
1228                      op->klass()->as_register(),
1229                      *op->stub()->entry());
1230   __ bind(*op->stub()->continuation());
1231 }
1232 
1233 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1234   Register len =  op->len()->as_register();
1235   __ uxtw(len, len);
1236 
1237   if (UseSlowPath || op->is_null_free() ||
1238       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1239       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1240     __ b(*op->stub()->entry());
1241   } else {
1242     Register tmp1 = op->tmp1()->as_register();
1243     Register tmp2 = op->tmp2()->as_register();
1244     Register tmp3 = op->tmp3()->as_register();
1245     if (len == tmp1) {
1246       tmp1 = tmp3;
1247     } else if (len == tmp2) {
1248       tmp2 = tmp3;
1249     } else if (len == tmp3) {
1250       // everything is ok
1251     } else {
1252       __ mov(tmp3, len);
1253     }
1254     __ allocate_array(op->obj()->as_register(),
1255                       len,
1256                       tmp1,
1257                       tmp2,

1329     assert(data != nullptr,                "need data for type check");
1330     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1331   }
1332   Label* success_target = success;
1333   Label* failure_target = failure;
1334 
1335   if (obj == k_RInfo) {
1336     k_RInfo = dst;
1337   } else if (obj == klass_RInfo) {
1338     klass_RInfo = dst;
1339   }
1340   if (k->is_loaded() && !UseCompressedClassPointers) {
1341     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1342   } else {
1343     Rtmp1 = op->tmp3()->as_register();
1344     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1345   }
1346 
1347   assert_different_registers(obj, k_RInfo, klass_RInfo);
1348 
1349   if (op->need_null_check()) {
1350     if (should_profile) {
1351       Register mdo  = klass_RInfo;
1352       __ mov_metadata(mdo, md->constant_encoding());
1353       Label not_null;
1354       __ cbnz(obj, not_null);
1355       // Object is null; update MDO and exit
1356       Address data_addr
1357         = __ form_address(rscratch2, mdo,
1358                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1359                           0);
1360       __ ldrb(rscratch1, data_addr);
1361       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1362       __ strb(rscratch1, data_addr);
1363       __ b(*obj_is_null);
1364       __ bind(not_null);






1365 
1366       Label update_done;
1367       Register recv = k_RInfo;
1368       __ load_klass(recv, obj);
1369       type_profile_helper(mdo, md, data, recv, &update_done);
1370       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1371       __ addptr(counter_addr, DataLayout::counter_increment);
1372 
1373       __ bind(update_done);
1374     } else {
1375       __ cbz(obj, *obj_is_null);
1376     }
1377   }
1378 
1379   if (!k->is_loaded()) {
1380     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1381   } else {
1382     __ mov_metadata(k_RInfo, k->constant_encoding());
1383   }
1384   __ verify_oop(obj);
1385 
1386   if (op->fast_check()) {
1387     // get object class
1388     // not a safepoint as obj null check happens earlier
1389     __ load_klass(rscratch1, obj);
1390     __ cmp( rscratch1, k_RInfo);
1391 
1392     __ br(Assembler::NE, *failure_target);
1393     // successful cast, fall through to profile or jump
1394   } else {
1395     // get object class
1396     // not a safepoint as obj null check happens earlier

1514     __ bind(success);
1515     if (dst != obj) {
1516       __ mov(dst, obj);
1517     }
1518   } else if (code == lir_instanceof) {
1519     Register obj = op->object()->as_register();
1520     Register dst = op->result_opr()->as_register();
1521     Label success, failure, done;
1522     emit_typecheck_helper(op, &success, &failure, &failure);
1523     __ bind(failure);
1524     __ mov(dst, zr);
1525     __ b(done);
1526     __ bind(success);
1527     __ mov(dst, 1);
1528     __ bind(done);
1529   } else {
1530     ShouldNotReachHere();
1531   }
1532 }
1533 
1534 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1535   // We are loading/storing from/to an array that *may* be a flat array (the
1536   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1537   // If this array is a flat array, take the slow path.
1538   __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1539   if (!op->value()->is_illegal()) {
1540     // The array is not a flat array, but it might be null-free. If we are storing
1541     // a null into a null-free array, take the slow path (which will throw NPE).
1542     Label skip;
1543     __ cbnz(op->value()->as_register(), skip);
1544     __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1545     __ bind(skip);
1546   }
1547 }
1548 
1549 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1550   // We are storing into an array that *may* be null-free (the declared type is
1551   // Object[], abstract[], interface[] or VT.ref[]).
1552   Label test_mark_word;
1553   Register tmp = op->tmp()->as_register();
1554   __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1555   __ tst(tmp, markWord::unlocked_value);
1556   __ br(Assembler::NE, test_mark_word);
1557   __ load_prototype_header(tmp, op->array()->as_register());
1558   __ bind(test_mark_word);
1559   __ tst(tmp, markWord::null_free_array_bit_in_place);
1560 }
1561 
1562 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1563   Label L_oops_equal;
1564   Label L_oops_not_equal;
1565   Label L_end;
1566 
1567   Register left  = op->left()->as_register();
1568   Register right = op->right()->as_register();
1569 
1570   __ cmp(left, right);
1571   __ br(Assembler::EQ, L_oops_equal);
1572 
1573   // (1) Null check -- if one of the operands is null, the other must not be null (because
1574   //     the two references are not equal), so they are not substitutable,
1575   //     FIXME: do null check only if the operand is nullable
1576   {
1577     __ cbz(left, L_oops_not_equal);
1578     __ cbz(right, L_oops_not_equal);
1579   }
1580 
1581   ciKlass* left_klass = op->left_klass();
1582   ciKlass* right_klass = op->right_klass();
1583 
1584   // (2) Inline type check -- if either of the operands is not a inline type,
1585   //     they are not substitutable. We do this only if we are not sure that the
1586   //     operands are inline type
1587   if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1588       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1589     Register tmp1  = op->tmp1()->as_register();
1590     __ mov(tmp1, markWord::inline_type_pattern);
1591     __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1592     __ andr(tmp1, tmp1, rscratch1);
1593     __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1594     __ andr(tmp1, tmp1, rscratch1);
1595     __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1596     __ br(Assembler::NE, L_oops_not_equal);
1597   }
1598 
1599   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1600   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1601     // No need to load klass -- the operands are statically known to be the same inline klass.
1602     __ b(*op->stub()->entry());
1603   } else {
1604     Register left_klass_op = op->left_klass_op()->as_register();
1605     Register right_klass_op = op->right_klass_op()->as_register();
1606 
1607     if (UseCompressedClassPointers) {
1608       __ ldrw(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1609       __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1610       __ cmpw(left_klass_op, right_klass_op);
1611     } else {
1612       __ ldr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1613       __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1614       __ cmp(left_klass_op, right_klass_op);
1615     }
1616 
1617     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1618     // fall through to L_oops_not_equal
1619   }
1620 
1621   __ bind(L_oops_not_equal);
1622   move(op->not_equal_result(), op->result_opr());
1623   __ b(L_end);
1624 
1625   __ bind(L_oops_equal);
1626   move(op->equal_result(), op->result_opr());
1627   __ b(L_end);
1628 
1629   // We've returned from the stub. R0 contains 0x0 IFF the two
1630   // operands are not substitutable. (Don't compare against 0x1 in case the
1631   // C compiler is naughty)
1632   __ bind(*op->stub()->continuation());
1633   __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1634   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1635   // fall-through
1636   __ bind(L_end);
1637 }
1638 
1639 
1640 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1641   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1642   __ cset(rscratch1, Assembler::NE);
1643   __ membar(__ AnyAny);
1644 }
1645 
1646 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1647   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1648   __ cset(rscratch1, Assembler::NE);
1649   __ membar(__ AnyAny);
1650 }
1651 
1652 
1653 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1654   Register addr;
1655   if (op->addr()->is_register()) {
1656     addr = as_reg(op->addr());
1657   } else {
1658     assert(op->addr()->is_address(), "what else?");
1659     LIR_Address* addr_ptr = op->addr()->as_address_ptr();

2136     __ cmp(left->as_register_lo(), right->as_register_lo());
2137     __ mov(dst->as_register(), (uint64_t)-1L);
2138     __ br(Assembler::LT, done);
2139     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2140     __ bind(done);
2141   } else {
2142     ShouldNotReachHere();
2143   }
2144 }
2145 
2146 
2147 void LIR_Assembler::align_call(LIR_Code code) {  }
2148 
2149 
2150 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2151   address call = __ trampoline_call(Address(op->addr(), rtype));
2152   if (call == nullptr) {
2153     bailout("trampoline stub overflow");
2154     return;
2155   }
2156   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2157   __ post_call_nop();
2158 }
2159 
2160 
2161 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2162   address call = __ ic_call(op->addr());
2163   if (call == nullptr) {
2164     bailout("trampoline stub overflow");
2165     return;
2166   }
2167   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2168   __ post_call_nop();
2169 }
2170 
2171 void LIR_Assembler::emit_static_call_stub() {
2172   address call_pc = __ pc();
2173   address stub = __ start_a_stub(call_stub_size());
2174   if (stub == nullptr) {
2175     bailout("static call stub overflow");
2176     return;
2177   }
2178 
2179   int start = __ offset();
2180 
2181   __ relocate(static_stub_Relocation::spec(call_pc));
2182   __ emit_static_call_stub();
2183 
2184   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2185         <= call_stub_size(), "stub too big");
2186   __ end_a_stub();
2187 }

2310 
2311 
2312 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2313   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2314   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2315   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2316   __ mov (rscratch1, c);
2317   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2318 }
2319 
2320 
2321 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2322   ShouldNotReachHere();
2323   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2324   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2325   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2326   __ lea(rscratch1, __ constant_oop_address(o));
2327   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2328 }
2329 
2330 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2331   if (null_check) {
2332     __ cbz(obj, *slow_path->entry());
2333   }
2334   if (is_dest) {
2335     __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2336   } else {
2337     __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2338   }
2339 }
2340 
2341 // This code replaces a call to arraycopy; no exception may
2342 // be thrown in this code, they must be thrown in the System.arraycopy
2343 // activation frame; we could save some checks if this would not be the case
2344 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2345   ciArrayKlass* default_type = op->expected_type();
2346   Register src = op->src()->as_register();
2347   Register dst = op->dst()->as_register();
2348   Register src_pos = op->src_pos()->as_register();
2349   Register dst_pos = op->dst_pos()->as_register();
2350   Register length  = op->length()->as_register();
2351   Register tmp = op->tmp()->as_register();
2352 
2353   CodeStub* stub = op->stub();
2354   int flags = op->flags();
2355   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2356   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2357 
2358   if (flags & LIR_OpArrayCopy::always_slow_path) {
2359     __ b(*stub->entry());
2360     __ bind(*stub->continuation());
2361     return;
2362   }
2363 
2364   // if we don't know anything, just go through the generic arraycopy
2365   if (default_type == nullptr // || basic_type == T_OBJECT
2366       ) {
2367     Label done;
2368     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2369 
2370     // Save the arguments in case the generic arraycopy fails and we
2371     // have to fall back to the JNI stub
2372     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2373     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2374     __ str(src,              Address(sp, 4*BytesPerWord));
2375 
2376     address copyfunc_addr = StubRoutines::generic_arraycopy();
2377     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2378 
2379     // The arguments are in java calling convention so we shift them
2380     // to C convention
2381     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2382     __ mov(c_rarg0, j_rarg0);
2383     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2397     __ cbz(r0, *stub->continuation());
2398 
2399     // Reload values from the stack so they are where the stub
2400     // expects them.
2401     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2402     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2403     __ ldr(src,              Address(sp, 4*BytesPerWord));
2404 
2405     // r0 is -1^K where K == partial copied count
2406     __ eonw(rscratch1, r0, zr);
2407     // adjust length down and src/end pos up by partial copied count
2408     __ subw(length, length, rscratch1);
2409     __ addw(src_pos, src_pos, rscratch1);
2410     __ addw(dst_pos, dst_pos, rscratch1);
2411     __ b(*stub->entry());
2412 
2413     __ bind(*stub->continuation());
2414     return;
2415   }
2416 
2417   // Handle inline type arrays
2418   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2419     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2420   }
2421 
2422   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2423     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2424   }
2425 
2426   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2427 
2428   int elem_size = type2aelembytes(basic_type);
2429   int scale = exact_log2(elem_size);
2430 
2431   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2432   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2433 
2434   // test for null
2435   if (flags & LIR_OpArrayCopy::src_null_check) {
2436     __ cbz(src, *stub->entry());
2437   }
2438   if (flags & LIR_OpArrayCopy::dst_null_check) {
2439     __ cbz(dst, *stub->entry());
2440   }
2441 
2442   // If the compiler was not able to prove that exact type of the source or the destination
2443   // of the arraycopy is an array type, check at runtime if the source or the destination is
2444   // an instance type.
2445   if (flags & LIR_OpArrayCopy::type_check) {

2960         __ verify_klass_ptr(tmp);
2961 #endif
2962       } else {
2963         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2964                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2965 
2966         __ ldr(tmp, mdo_addr);
2967         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2968 
2969         __ orr(tmp, tmp, TypeEntries::type_unknown);
2970         __ str(tmp, mdo_addr);
2971         // FIXME: Write barrier needed here?
2972       }
2973     }
2974 
2975     __ bind(next);
2976   }
2977   COMMENT("} emit_profile_type");
2978 }
2979 
2980 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
2981   Register obj = op->obj()->as_register();
2982   Register tmp = op->tmp()->as_pointer_register();
2983   bool not_null = op->not_null();
2984   int flag = op->flag();
2985 
2986   Label not_inline_type;
2987   if (!not_null) {
2988     __ cbz(obj, not_inline_type);
2989   }
2990 
2991   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
2992 
2993   Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
2994   __ ldrb(rscratch1, mdo_addr);
2995   __ orr(rscratch1, rscratch1, flag);
2996   __ strb(rscratch1, mdo_addr);
2997 
2998   __ bind(not_inline_type);
2999 }
3000 
3001 void LIR_Assembler::align_backward_branch_target() {
3002 }
3003 
3004 
3005 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3006   // tmp must be unused
3007   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3008 
3009   if (left->is_single_cpu()) {
3010     assert(dest->is_single_cpu(), "expect single result reg");
3011     __ negw(dest->as_register(), left->as_register());
3012   } else if (left->is_double_cpu()) {
3013     assert(dest->is_double_cpu(), "expect double result reg");
3014     __ neg(dest->as_register_lo(), left->as_register_lo());
3015   } else if (left->is_single_fpu()) {
3016     assert(dest->is_single_fpu(), "expect single float result reg");
3017     __ fnegs(dest->as_float_reg(), left->as_float_reg());
3018   } else {
3019     assert(left->is_double_fpu(), "expect double float operand reg");

3120 void LIR_Assembler::membar_loadload() {
3121   __ membar(Assembler::LoadLoad);
3122 }
3123 
3124 void LIR_Assembler::membar_storestore() {
3125   __ membar(MacroAssembler::StoreStore);
3126 }
3127 
3128 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3129 
3130 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3131 
3132 void LIR_Assembler::on_spin_wait() {
3133   __ spin_wait();
3134 }
3135 
3136 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3137   __ mov(result_reg->as_register(), rthread);
3138 }
3139 
3140 void LIR_Assembler::check_orig_pc() {
3141   __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3142   __ cmp(rscratch2, (u1)NULL_WORD);
3143 }
3144 
3145 void LIR_Assembler::peephole(LIR_List *lir) {
3146 #if 0
3147   if (tableswitch_count >= max_tableswitches)
3148     return;
3149 
3150   /*
3151     This finite-state automaton recognizes sequences of compare-and-
3152     branch instructions.  We will turn them into a tableswitch.  You
3153     could argue that C1 really shouldn't be doing this sort of
3154     optimization, but without it the code is really horrible.
3155   */
3156 
3157   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3158   int first_key, last_key = -2147483648;
3159   int next_key = 0;
3160   int start_insn = -1;
3161   int last_insn = -1;
3162   Register reg = noreg;
3163   LIR_Opr reg_opr;
< prev index next >