1 /*
   2  * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/assembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "nativeInst_riscv.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "utilities/powerOfTwo.hpp"
  44 #include "vmreg_riscv.inline.hpp"
  45 
  46 #ifndef PRODUCT
  47 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  48 #else
  49 #define COMMENT(x)
  50 #endif
  51 
  52 NEEDS_CLEANUP // remove this definitions ?
  53 const Register SYNC_header = x10;   // synchronization header
  54 const Register SHIFT_count = x10;   // where count for shift operations must be
  55 
  56 #define __ _masm->
  57 
  58 static void select_different_registers(Register preserve,
  59                                        Register extra,
  60                                        Register &tmp1,
  61                                        Register &tmp2,
  62                                        Register &tmp3) {
  63   if (tmp1 == preserve) {
  64     assert_different_registers(tmp1, tmp2, tmp3, extra);
  65     tmp1 = extra;
  66   } else if (tmp2 == preserve) {
  67     assert_different_registers(tmp1, tmp2, tmp3, extra);
  68     tmp2 = extra;
  69   } else if (tmp3 == preserve) {
  70     assert_different_registers(tmp1, tmp2, tmp3, extra);
  71     tmp3 = extra;
  72   }
  73   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  74 }
  75 
  76 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  77 
  78 void LIR_Assembler::clinit_barrier(ciMethod* method) {
  79   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
  80   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
  81 
  82   Label L_skip_barrier;
  83 
  84   __ mov_metadata(t1, method->holder()->constant_encoding());
  85   __ clinit_barrier(t1, t0, &L_skip_barrier /* L_fast_path */);
  86   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
  87   __ bind(L_skip_barrier);
  88 }
  89 
  90 LIR_Opr LIR_Assembler::receiverOpr() {
  91   return FrameMap::receiver_opr;
  92 }
  93 
  94 LIR_Opr LIR_Assembler::osrBufferPointer() {
  95   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
  96 }
  97 
  98 void LIR_Assembler::breakpoint() { Unimplemented(); }
  99 
 100 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 101 
 102 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 103 
 104 static jlong as_long(LIR_Opr data) {
 105   jlong result;
 106   switch (data->type()) {
 107     case T_INT:
 108       result = (data->as_jint());
 109       break;
 110     case T_LONG:
 111       result = (data->as_jlong());
 112       break;
 113     default:
 114       ShouldNotReachHere();
 115       result = 0;  // unreachable
 116   }
 117   return result;
 118 }
 119 
 120 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 121   if (addr->base()->is_illegal()) {
 122     assert(addr->index()->is_illegal(), "must be illegal too");
 123     __ movptr(tmp, (address)addr->disp());
 124     return Address(tmp, 0);
 125   }
 126 
 127   Register base = addr->base()->as_pointer_register();
 128   LIR_Opr index_opr = addr->index();
 129 
 130   if (index_opr->is_illegal()) {
 131     return Address(base, addr->disp());
 132   }
 133 
 134   int scale = addr->scale();
 135   if (index_opr->is_cpu_register()) {
 136     Register index;
 137     if (index_opr->is_single_cpu()) {
 138       index = index_opr->as_register();
 139     } else {
 140       index = index_opr->as_register_lo();
 141     }
 142     if (scale != 0) {
 143       __ shadd(tmp, index, base, tmp, scale);
 144     } else {
 145       __ add(tmp, base, index);
 146     }
 147     return Address(tmp, addr->disp());
 148   } else if (index_opr->is_constant()) {
 149     intptr_t addr_offset = (((intptr_t)index_opr->as_constant_ptr()->as_jint()) << scale) + addr->disp();
 150     return Address(base, addr_offset);
 151   }
 152 
 153   Unimplemented();
 154   return Address();
 155 }
 156 
 157 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 158   ShouldNotReachHere();
 159   return Address();
 160 }
 161 
 162 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 163   return as_Address(addr, t0);
 164 }
 165 
 166 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 167   return as_Address(addr);
 168 }
 169 
 170 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
 171 // not encodable as a base + (immediate) offset, generate an explicit address
 172 // calculation to hold the address in t0.
 173 Address LIR_Assembler::stack_slot_address(int index, uint size, int adjust) {
 174   precond(size == 4 || size == 8);
 175   Address addr = frame_map()->address_for_slot(index, adjust);
 176   precond(addr.getMode() == Address::base_plus_offset);
 177   precond(addr.base() == sp);
 178   precond(addr.offset() > 0);
 179   uint mask = size - 1;
 180   assert((addr.offset() & mask) == 0, "scaled offsets only");
 181 
 182   return addr;
 183 }
 184 
 185 void LIR_Assembler::osr_entry() {
 186   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 187   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 188   guarantee(osr_entry != nullptr, "null osr_entry!");
 189   ValueStack* entry_state = osr_entry->state();
 190   int number_of_locks = entry_state->locks_size();
 191 
 192   // we jump here if osr happens with the interpreter
 193   // state set up to continue at the beginning of the
 194   // loop that triggered osr - in particular, we have
 195   // the following registers setup:
 196   //
 197   // x12: osr buffer
 198   //
 199 
 200   //build frame
 201   ciMethod* m = compilation()->method();
 202   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 203 
 204   // OSR buffer is
 205   //
 206   // locals[nlocals-1..0]
 207   // monitors[0..number_of_locks]
 208   //
 209   // locals is a direct copy of the interpreter frame so in the osr buffer
 210   // so first slot in the local array is the last local from the interpreter
 211   // and last slot is local[0] (receiver) from the interpreter
 212   //
 213   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 214   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 215   // in the interpreter frame (the method lock if a sync method)
 216 
 217   // Initialize monitors in the compiled activation.
 218   //   x12: pointer to osr buffer
 219   // All other registers are dead at this point and the locals will be
 220   // copied into place by code emitted in the IR.
 221 
 222   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 223   {
 224     assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 225     int monitor_offset = BytesPerWord * method()->max_locals() +
 226       (2 * BytesPerWord) * (number_of_locks - 1);
 227     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 228     // the OSR buffer using 2 word entries: first the lock and then
 229     // the oop.
 230     for (int i = 0; i < number_of_locks; i++) {
 231       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 232 #ifdef ASSERT
 233       // verify the interpreter's monitor has a non-null object
 234       {
 235         Label L;
 236         __ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
 237         __ bnez(t0, L);
 238         __ stop("locked object is null");
 239         __ bind(L);
 240       }
 241 #endif // ASSERT
 242       __ ld(x9, Address(OSR_buf, slot_offset + 0));
 243       __ sd(x9, frame_map()->address_for_monitor_lock(i));
 244       __ ld(x9, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
 245       __ sd(x9, frame_map()->address_for_monitor_object(i));
 246     }
 247   }
 248 }
 249 
 250 // inline cache check; done before the frame is built.
 251 int LIR_Assembler::check_icache() {
 252   return __ ic_check(CodeEntryAlignment);
 253 }
 254 
 255 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 256   if (o == nullptr) {
 257     __ mv(reg, zr);
 258   } else {
 259     __ movoop(reg, o);
 260   }
 261 }
 262 
 263 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 264   deoptimize_trap(info);
 265 }
 266 
 267 // This specifies the rsp decrement needed to build the frame
 268 int LIR_Assembler::initial_frame_size_in_bytes() const {
 269   // if rounding, must let FrameMap know!
 270 
 271   return in_bytes(frame_map()->framesize_in_bytes());
 272 }
 273 
 274 int LIR_Assembler::emit_exception_handler() {
 275   // generate code for exception handler
 276   address handler_base = __ start_a_stub(exception_handler_size());
 277   if (handler_base == nullptr) {
 278     // not enough space left for the handler
 279     bailout("exception handler overflow");
 280     return -1;
 281   }
 282 
 283   int offset = code_offset();
 284 
 285   // the exception oop and pc are in x10, and x13
 286   // no other registers need to be preserved, so invalidate them
 287   __ invalidate_registers(false, true, true, false, true, true);
 288 
 289   // check that there is really an exception
 290   __ verify_not_null_oop(x10);
 291 
 292   // search an exception handler (x10: exception oop, x13: throwing pc)
 293   __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
 294   __ should_not_reach_here();
 295   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 296   __ end_a_stub();
 297 
 298   return offset;
 299 }
 300 
 301 // Emit the code to remove the frame from the stack in the exception
 302 // unwind path.
 303 int LIR_Assembler::emit_unwind_handler() {
 304 #ifndef PRODUCT
 305   if (CommentedAssembly) {
 306     _masm->block_comment("Unwind handler");
 307   }
 308 #endif // PRODUCT
 309 
 310   int offset = code_offset();
 311 
 312   // Fetch the exception from TLS and clear out exception related thread state
 313   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
 314   __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
 315   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
 316 
 317   __ bind(_unwind_handler_entry);
 318   __ verify_not_null_oop(x10);
 319   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 320     __ mv(x9, x10);   // Preserve the exception
 321   }
 322 
 323   // Perform needed unlocking
 324   MonitorExitStub* stub = nullptr;
 325   if (method()->is_synchronized()) {
 326     monitor_address(0, FrameMap::r10_opr);
 327     stub = new MonitorExitStub(FrameMap::r10_opr, 0);
 328     __ unlock_object(x15, x14, x10, x16, *stub->entry());
 329     __ bind(*stub->continuation());
 330   }
 331 
 332   if (compilation()->env()->dtrace_method_probes()) {
 333     __ mv(c_rarg0, xthread);
 334     __ mov_metadata(c_rarg1, method()->constant_encoding());
 335     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 336   }
 337 
 338   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 339     __ mv(x10, x9);   // Restore the exception
 340   }
 341 
 342   // remove the activation and dispatch to the unwind handler
 343   __ block_comment("remove_frame and dispatch to the unwind handler");
 344   __ remove_frame(initial_frame_size_in_bytes());
 345   __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
 346 
 347   // Emit the slow path assembly
 348   if (stub != nullptr) {
 349     stub->emit_code(this);
 350   }
 351 
 352   return offset;
 353 }
 354 
 355 int LIR_Assembler::emit_deopt_handler() {
 356   // generate code for exception handler
 357   address handler_base = __ start_a_stub(deopt_handler_size());
 358   if (handler_base == nullptr) {
 359     // not enough space left for the handler
 360     bailout("deopt handler overflow");
 361     return -1;
 362   }
 363 
 364   int offset = code_offset();
 365 
 366   Label start;
 367   __ bind(start);
 368 
 369   __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 370 
 371   int entry_offset = __ offset();
 372   __ j(start);
 373 
 374   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 375   assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
 376          "out of bounds read in post-call NOP check");
 377   __ end_a_stub();
 378 
 379   return entry_offset;
 380 }
 381 
 382 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 383   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == x10, "word returns are in x10");
 384 
 385   // Pop the stack before the safepoint code
 386   __ remove_frame(initial_frame_size_in_bytes());
 387 
 388   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 389     __ reserved_stack_check();
 390   }
 391 
 392   code_stub->set_safepoint_offset(__ offset());
 393   __ relocate(relocInfo::poll_return_type);
 394   __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
 395   __ ret();
 396 }
 397 
 398 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 399   guarantee(info != nullptr, "Shouldn't be null");
 400   __ get_polling_page(t0, relocInfo::poll_type);
 401   add_debug_info_for_branch(info);  // This isn't just debug info:
 402                                     // it's the oop map
 403   __ read_polling_page(t0, 0, relocInfo::poll_type);
 404   return __ offset();
 405 }
 406 
 407 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 408   __ mv(to_reg, from_reg);
 409 }
 410 
 411 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 412 
 413 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 414   assert(src->is_constant(), "should not call otherwise");
 415   assert(dest->is_register(), "should not call otherwise");
 416   LIR_Const* c = src->as_constant_ptr();
 417   address const_addr = nullptr;
 418   jfloat fconst;
 419   jdouble dconst;
 420 
 421   switch (c->type()) {
 422     case T_INT:
 423       assert(patch_code == lir_patch_none, "no patching handled here");
 424       __ mv(dest->as_register(), c->as_jint());
 425       break;
 426 
 427     case T_ADDRESS:
 428       assert(patch_code == lir_patch_none, "no patching handled here");
 429       __ mv(dest->as_register(), c->as_jint());
 430       break;
 431 
 432     case T_LONG:
 433       assert(patch_code == lir_patch_none, "no patching handled here");
 434       __ mv(dest->as_register_lo(), (intptr_t)c->as_jlong());
 435       break;
 436 
 437     case T_OBJECT:
 438     case T_ARRAY:
 439       if (patch_code == lir_patch_none) {
 440         jobject2reg(c->as_jobject(), dest->as_register());
 441       } else {
 442         jobject2reg_with_patching(dest->as_register(), info);
 443       }
 444       break;
 445 
 446     case T_METADATA:
 447       if (patch_code != lir_patch_none) {
 448         klass2reg_with_patching(dest->as_register(), info);
 449       } else {
 450         __ mov_metadata(dest->as_register(), c->as_metadata());
 451       }
 452       break;
 453 
 454     case T_FLOAT:
 455       fconst = c->as_jfloat();
 456       if (MacroAssembler::can_fp_imm_load(fconst)) {
 457         __ fli_s(dest->as_float_reg(), fconst);
 458       } else {
 459         const_addr = float_constant(fconst);
 460         assert(const_addr != nullptr, "must create float constant in the constant table");
 461         __ flw(dest->as_float_reg(), InternalAddress(const_addr));
 462       }
 463       break;
 464 
 465     case T_DOUBLE:
 466       dconst = c->as_jdouble();
 467       if (MacroAssembler::can_dp_imm_load(dconst)) {
 468         __ fli_d(dest->as_double_reg(), dconst);
 469       } else {
 470         const_addr = double_constant(c->as_jdouble());
 471         assert(const_addr != nullptr, "must create double constant in the constant table");
 472         __ fld(dest->as_double_reg(), InternalAddress(const_addr));
 473       }
 474       break;
 475 
 476     default:
 477       ShouldNotReachHere();
 478   }
 479 }
 480 
 481 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 482   assert(src->is_constant(), "should not call otherwise");
 483   assert(dest->is_stack(), "should not call otherwise");
 484   LIR_Const* c = src->as_constant_ptr();
 485   switch (c->type()) {
 486     case T_OBJECT:
 487       if (c->as_jobject() == nullptr) {
 488         __ sd(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 489       } else {
 490         const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
 491         reg2stack(FrameMap::t1_opr, dest, c->type());
 492       }
 493       break;
 494     case T_ADDRESS:   // fall through
 495       const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
 496       reg2stack(FrameMap::t1_opr, dest, c->type());
 497     case T_INT:       // fall through
 498     case T_FLOAT:
 499       if (c->as_jint_bits() == 0) {
 500         __ sw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 501       } else {
 502         __ mv(t1, c->as_jint_bits());
 503         __ sw(t1, frame_map()->address_for_slot(dest->single_stack_ix()));
 504       }
 505       break;
 506     case T_LONG:      // fall through
 507     case T_DOUBLE:
 508       if (c->as_jlong_bits() == 0) {
 509         __ sd(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 510                                                 lo_word_offset_in_bytes));
 511       } else {
 512         __ mv(t1, (intptr_t)c->as_jlong_bits());
 513         __ sd(t1, frame_map()->address_for_slot(dest->double_stack_ix(),
 514                                                 lo_word_offset_in_bytes));
 515       }
 516       break;
 517     default:
 518       ShouldNotReachHere();
 519   }
 520 }
 521 
 522 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 523   assert(src->is_constant(), "should not call otherwise");
 524   assert(dest->is_address(), "should not call otherwise");
 525   LIR_Const* c = src->as_constant_ptr();
 526   LIR_Address* to_addr = dest->as_address_ptr();
 527   void (MacroAssembler::* insn)(Register Rt, const Address &adr, Register temp);
 528   switch (type) {
 529     case T_ADDRESS:
 530       assert(c->as_jint() == 0, "should be");
 531       insn = &MacroAssembler::sd; break;
 532     case T_LONG:
 533       assert(c->as_jlong() == 0, "should be");
 534       insn = &MacroAssembler::sd; break;
 535     case T_DOUBLE:
 536       assert(c->as_jdouble() == 0.0, "should be");
 537       insn = &MacroAssembler::sd; break;
 538     case T_INT:
 539       assert(c->as_jint() == 0, "should be");
 540       insn = &MacroAssembler::sw; break;
 541     case T_FLOAT:
 542       assert(c->as_jfloat() == 0.0f, "should be");
 543       insn = &MacroAssembler::sw; break;
 544     case T_OBJECT:    // fall through
 545     case T_ARRAY:
 546       assert(c->as_jobject() == nullptr, "should be");
 547       if (UseCompressedOops && !wide) {
 548         insn = &MacroAssembler::sw;
 549       } else {
 550         insn = &MacroAssembler::sd;
 551       }
 552       break;
 553     case T_CHAR:      // fall through
 554     case T_SHORT:
 555       assert(c->as_jint() == 0, "should be");
 556       insn = &MacroAssembler::sh;
 557       break;
 558     case T_BOOLEAN:   // fall through
 559     case T_BYTE:
 560       assert(c->as_jint() == 0, "should be");
 561       insn = &MacroAssembler::sb; break;
 562     default:
 563       ShouldNotReachHere();
 564       insn = &MacroAssembler::sd;  // unreachable
 565   }
 566   if (info != nullptr) {
 567     add_debug_info_for_null_check_here(info);
 568   }
 569   (_masm->*insn)(zr, as_Address(to_addr), t0);
 570 }
 571 
 572 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 573   assert(src->is_register(), "should not call otherwise");
 574   assert(dest->is_register(), "should not call otherwise");
 575 
 576   // move between cpu-registers
 577   if (dest->is_single_cpu()) {
 578     if (src->type() == T_LONG) {
 579       // Can do LONG -> OBJECT
 580       move_regs(src->as_register_lo(), dest->as_register());
 581       return;
 582     }
 583     assert(src->is_single_cpu(), "must match");
 584     if (src->type() == T_OBJECT) {
 585       __ verify_oop(src->as_register());
 586     }
 587     move_regs(src->as_register(), dest->as_register());
 588   } else if (dest->is_double_cpu()) {
 589     if (is_reference_type(src->type())) {
 590       __ verify_oop(src->as_register());
 591       move_regs(src->as_register(), dest->as_register_lo());
 592       return;
 593     }
 594     assert(src->is_double_cpu(), "must match");
 595     Register f_lo = src->as_register_lo();
 596     Register f_hi = src->as_register_hi();
 597     Register t_lo = dest->as_register_lo();
 598     Register t_hi = dest->as_register_hi();
 599     assert(f_hi == f_lo, "must be same");
 600     assert(t_hi == t_lo, "must be same");
 601     move_regs(f_lo, t_lo);
 602   } else if (dest->is_single_fpu()) {
 603     assert(src->is_single_fpu(), "expect single fpu");
 604     __ fmv_s(dest->as_float_reg(), src->as_float_reg());
 605   } else if (dest->is_double_fpu()) {
 606     assert(src->is_double_fpu(), "expect double fpu");
 607     __ fmv_d(dest->as_double_reg(), src->as_double_reg());
 608   } else {
 609     ShouldNotReachHere();
 610   }
 611 }
 612 
 613 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 614   precond(src->is_register() && dest->is_stack());
 615 
 616   uint const c_sz32 = sizeof(uint32_t);
 617   uint const c_sz64 = sizeof(uint64_t);
 618 
 619   assert(src->is_register(), "should not call otherwise");
 620   assert(dest->is_stack(), "should not call otherwise");
 621   if (src->is_single_cpu()) {
 622     int index = dest->single_stack_ix();
 623     if (is_reference_type(type)) {
 624       __ sd(src->as_register(), stack_slot_address(index, c_sz64));
 625       __ verify_oop(src->as_register());
 626     } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
 627       __ sd(src->as_register(), stack_slot_address(index, c_sz64));
 628     } else {
 629       __ sw(src->as_register(), stack_slot_address(index, c_sz32));
 630     }
 631   } else if (src->is_double_cpu()) {
 632     int index = dest->double_stack_ix();
 633     Address dest_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes);
 634     __ sd(src->as_register_lo(), dest_addr_LO);
 635   } else if (src->is_single_fpu()) {
 636     int index = dest->single_stack_ix();
 637     __ fsw(src->as_float_reg(), stack_slot_address(index, c_sz32));
 638   } else if (src->is_double_fpu()) {
 639     int index = dest->double_stack_ix();
 640     __ fsd(src->as_double_reg(), stack_slot_address(index, c_sz64));
 641   } else {
 642     ShouldNotReachHere();
 643   }
 644 }
 645 
 646 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 647   LIR_Address* to_addr = dest->as_address_ptr();
 648   // t0 was used as tmp reg in as_Address, so we use t1 as compressed_src
 649   Register compressed_src = t1;
 650 
 651   if (patch_code != lir_patch_none) {
 652     deoptimize_trap(info);
 653     return;
 654   }
 655 
 656   if (is_reference_type(type)) {
 657     __ verify_oop(src->as_register());
 658 
 659     if (UseCompressedOops && !wide) {
 660       __ encode_heap_oop(compressed_src, src->as_register());
 661     } else {
 662       compressed_src = src->as_register();
 663     }
 664   }
 665 
 666   int null_check_here = code_offset();
 667 
 668   switch (type) {
 669     case T_FLOAT:
 670       __ fsw(src->as_float_reg(), as_Address(to_addr));
 671       break;
 672 
 673     case T_DOUBLE:
 674       __ fsd(src->as_double_reg(), as_Address(to_addr));
 675       break;
 676 
 677     case T_ARRAY:      // fall through
 678     case T_OBJECT:
 679       if (UseCompressedOops && !wide) {
 680         __ sw(compressed_src, as_Address(to_addr));
 681       } else {
 682         __ sd(compressed_src, as_Address(to_addr));
 683       }
 684       break;
 685     case T_METADATA:
 686       // We get here to store a method pointer to the stack to pass to
 687       // a dtrace runtime call. This can't work on 64 bit with
 688       // compressed klass ptrs: T_METADATA can be compressed klass
 689       // ptr or a 64 bit method pointer.
 690       ShouldNotReachHere();
 691       __ sd(src->as_register(), as_Address(to_addr));
 692       break;
 693     case T_ADDRESS:
 694       __ sd(src->as_register(), as_Address(to_addr));
 695       break;
 696     case T_INT:
 697       __ sw(src->as_register(), as_Address(to_addr));
 698       break;
 699     case T_LONG:
 700       __ sd(src->as_register_lo(), as_Address(to_addr));
 701       break;
 702     case T_BYTE:    // fall through
 703     case T_BOOLEAN:
 704       __ sb(src->as_register(), as_Address(to_addr));
 705       break;
 706     case T_CHAR:    // fall through
 707     case T_SHORT:
 708       __ sh(src->as_register(), as_Address(to_addr));
 709       break;
 710     default:
 711       ShouldNotReachHere();
 712   }
 713 
 714   if (info != nullptr) {
 715     add_debug_info_for_null_check(null_check_here, info);
 716   }
 717 }
 718 
 719 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 720   precond(src->is_stack() && dest->is_register());
 721 
 722   uint const c_sz32 = sizeof(uint32_t);
 723   uint const c_sz64 = sizeof(uint64_t);
 724 
 725   if (dest->is_single_cpu()) {
 726     int index = src->single_stack_ix();
 727     if (type == T_INT) {
 728       __ lw(dest->as_register(), stack_slot_address(index, c_sz32));
 729     } else if (is_reference_type(type)) {
 730       __ ld(dest->as_register(), stack_slot_address(index, c_sz64));
 731       __ verify_oop(dest->as_register());
 732     } else if (type == T_METADATA || type == T_ADDRESS) {
 733       __ ld(dest->as_register(), stack_slot_address(index, c_sz64));
 734     } else {
 735       __ lwu(dest->as_register(), stack_slot_address(index, c_sz32));
 736     }
 737   } else if (dest->is_double_cpu()) {
 738     int index = src->double_stack_ix();
 739     Address src_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes);
 740     __ ld(dest->as_register_lo(), src_addr_LO);
 741   } else if (dest->is_single_fpu()) {
 742     int index = src->single_stack_ix();
 743     __ flw(dest->as_float_reg(), stack_slot_address(index, c_sz32));
 744   } else if (dest->is_double_fpu()) {
 745     int index = src->double_stack_ix();
 746     __ fld(dest->as_double_reg(), stack_slot_address(index, c_sz64));
 747   } else {
 748     ShouldNotReachHere();
 749   }
 750 }
 751 
 752 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 753   deoptimize_trap(info);
 754 }
 755 
 756 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 757   LIR_Opr temp;
 758   if (type == T_LONG || type == T_DOUBLE) {
 759     temp = FrameMap::t1_long_opr;
 760   } else {
 761     temp = FrameMap::t1_opr;
 762   }
 763 
 764   stack2reg(src, temp, src->type());
 765   reg2stack(temp, dest, dest->type());
 766 }
 767 
 768 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 769   assert(src->is_address(), "should not call otherwise");
 770   assert(dest->is_register(), "should not call otherwise");
 771 
 772   LIR_Address* addr = src->as_address_ptr();
 773   LIR_Address* from_addr = src->as_address_ptr();
 774 
 775   if (addr->base()->type() == T_OBJECT) {
 776     __ verify_oop(addr->base()->as_pointer_register());
 777   }
 778 
 779   if (patch_code != lir_patch_none) {
 780     deoptimize_trap(info);
 781     return;
 782   }
 783 
 784   if (info != nullptr) {
 785     add_debug_info_for_null_check_here(info);
 786   }
 787 
 788   int null_check_here = code_offset();
 789   switch (type) {
 790     case T_FLOAT:
 791       __ flw(dest->as_float_reg(), as_Address(from_addr));
 792       break;
 793     case T_DOUBLE:
 794       __ fld(dest->as_double_reg(), as_Address(from_addr));
 795       break;
 796     case T_ARRAY:     // fall through
 797     case T_OBJECT:
 798       if (UseCompressedOops && !wide) {
 799         __ lwu(dest->as_register(), as_Address(from_addr));
 800       } else {
 801         __ ld(dest->as_register(), as_Address(from_addr));
 802       }
 803       break;
 804     case T_METADATA:
 805       // We get here to store a method pointer to the stack to pass to
 806       // a dtrace runtime call. This can't work on 64 bit with
 807       // compressed klass ptrs: T_METADATA can be a compressed klass
 808       // ptr or a 64 bit method pointer.
 809       ShouldNotReachHere();
 810       __ ld(dest->as_register(), as_Address(from_addr));
 811       break;
 812     case T_ADDRESS:
 813       __ ld(dest->as_register(), as_Address(from_addr));
 814       break;
 815     case T_INT:
 816       __ lw(dest->as_register(), as_Address(from_addr));
 817       break;
 818     case T_LONG:
 819       __ ld(dest->as_register_lo(), as_Address_lo(from_addr));
 820       break;
 821     case T_BYTE:
 822       __ lb(dest->as_register(), as_Address(from_addr));
 823       break;
 824     case T_BOOLEAN:
 825       __ lbu(dest->as_register(), as_Address(from_addr));
 826       break;
 827     case T_CHAR:
 828       __ lhu(dest->as_register(), as_Address(from_addr));
 829       break;
 830     case T_SHORT:
 831       __ lh(dest->as_register(), as_Address(from_addr));
 832       break;
 833     default:
 834       ShouldNotReachHere();
 835   }
 836 
 837   if (is_reference_type(type)) {
 838     if (UseCompressedOops && !wide) {
 839       __ decode_heap_oop(dest->as_register());
 840     }
 841 
 842     __ verify_oop(dest->as_register());
 843   }
 844 }
 845 
 846 void LIR_Assembler::emit_op3(LIR_Op3* op) {
 847   switch (op->code()) {
 848     case lir_idiv: // fall through
 849     case lir_irem:
 850       arithmetic_idiv(op->code(),
 851                       op->in_opr1(),
 852                       op->in_opr2(),
 853                       op->in_opr3(),
 854                       op->result_opr(),
 855                       op->info());
 856       break;
 857     case lir_fmad:
 858       __ fmadd_d(op->result_opr()->as_double_reg(),
 859                  op->in_opr1()->as_double_reg(),
 860                  op->in_opr2()->as_double_reg(),
 861                  op->in_opr3()->as_double_reg());
 862       break;
 863     case lir_fmaf:
 864       __ fmadd_s(op->result_opr()->as_float_reg(),
 865                  op->in_opr1()->as_float_reg(),
 866                  op->in_opr2()->as_float_reg(),
 867                  op->in_opr3()->as_float_reg());
 868       break;
 869     default:
 870       ShouldNotReachHere();
 871   }
 872 }
 873 
 874 // Consider using cmov (Zicond)
 875 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
 876                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
 877   Label label;
 878 
 879   emit_branch(condition, cmp_opr1, cmp_opr2, label, /* is_far */ false,
 880               /* is_unordered */ (condition == lir_cond_greaterEqual || condition == lir_cond_greater) ? false : true);
 881 
 882   Label done;
 883   move_op(opr2, result, type, lir_patch_none, nullptr,
 884           false);  // wide
 885   __ j(done);
 886   __ bind(label);
 887   move_op(opr1, result, type, lir_patch_none, nullptr,
 888           false);  // wide
 889   __ bind(done);
 890 }
 891 
 892 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
 893   LIR_Condition condition = op->cond();
 894   if (condition == lir_cond_always) {
 895     if (op->info() != nullptr) {
 896       add_debug_info_for_branch(op->info());
 897     }
 898   } else {
 899     assert(op->in_opr1() != LIR_OprFact::illegalOpr && op->in_opr2() != LIR_OprFact::illegalOpr, "conditional branches must have legal operands");
 900   }
 901   bool is_unordered = (op->ublock() == op->block());
 902   emit_branch(condition, op->in_opr1(), op->in_opr2(), *op->label(), /* is_far */ true, is_unordered);
 903 }
 904 
 905 void LIR_Assembler::emit_branch(LIR_Condition cmp_flag, LIR_Opr cmp1, LIR_Opr cmp2, Label& label,
 906                                 bool is_far, bool is_unordered) {
 907 
 908   if (cmp_flag == lir_cond_always) {
 909     __ j(label);
 910     return;
 911   }
 912 
 913   if (cmp1->is_cpu_register()) {
 914     Register reg1 = as_reg(cmp1);
 915     if (cmp2->is_cpu_register()) {
 916       Register reg2 = as_reg(cmp2);
 917       __ c1_cmp_branch(cmp_flag, reg1, reg2, label, cmp1->type(), is_far);
 918     } else if (cmp2->is_constant()) {
 919       const2reg_helper(cmp2);
 920       __ c1_cmp_branch(cmp_flag, reg1, t0, label, cmp2->type(), is_far);
 921     } else {
 922       ShouldNotReachHere();
 923     }
 924   } else if (cmp1->is_single_fpu()) {
 925     assert(cmp2->is_single_fpu(), "expect single float register");
 926     __ c1_float_cmp_branch(cmp_flag, cmp1->as_float_reg(), cmp2->as_float_reg(), label, is_far, is_unordered);
 927   } else if (cmp1->is_double_fpu()) {
 928     assert(cmp2->is_double_fpu(), "expect double float register");
 929     __ c1_float_cmp_branch(cmp_flag | C1_MacroAssembler::c1_double_branch_mask,
 930                            cmp1->as_double_reg(), cmp2->as_double_reg(), label, is_far, is_unordered);
 931   } else {
 932     ShouldNotReachHere();
 933   }
 934 }
 935 
 936 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
 937   LIR_Opr src  = op->in_opr();
 938   LIR_Opr dest = op->result_opr();
 939 
 940   switch (op->bytecode()) {
 941     case Bytecodes::_i2f:
 942       __ fcvt_s_w(dest->as_float_reg(), src->as_register()); break;
 943     case Bytecodes::_i2d:
 944       __ fcvt_d_w(dest->as_double_reg(), src->as_register()); break;
 945     case Bytecodes::_l2d:
 946       __ fcvt_d_l(dest->as_double_reg(), src->as_register_lo()); break;
 947     case Bytecodes::_l2f:
 948       __ fcvt_s_l(dest->as_float_reg(), src->as_register_lo()); break;
 949     case Bytecodes::_f2d:
 950       __ fcvt_d_s(dest->as_double_reg(), src->as_float_reg()); break;
 951     case Bytecodes::_d2f:
 952       __ fcvt_s_d(dest->as_float_reg(), src->as_double_reg()); break;
 953     case Bytecodes::_i2c:
 954       __ zext(dest->as_register(), src->as_register(), 16); break;
 955     case Bytecodes::_i2l:
 956       __ sext(dest->as_register_lo(), src->as_register(), 32); break;
 957     case Bytecodes::_i2s:
 958       __ sext(dest->as_register(), src->as_register(), 16); break;
 959     case Bytecodes::_i2b:
 960       __ sext(dest->as_register(), src->as_register(), 8); break;
 961     case Bytecodes::_l2i:
 962       __ sext(dest->as_register(), src->as_register_lo(), 32); break;
 963     case Bytecodes::_d2l:
 964       __ fcvt_l_d_safe(dest->as_register_lo(), src->as_double_reg()); break;
 965     case Bytecodes::_f2i:
 966       __ fcvt_w_s_safe(dest->as_register(), src->as_float_reg()); break;
 967     case Bytecodes::_f2l:
 968       __ fcvt_l_s_safe(dest->as_register_lo(), src->as_float_reg()); break;
 969     case Bytecodes::_d2i:
 970       __ fcvt_w_d_safe(dest->as_register(), src->as_double_reg()); break;
 971     default:
 972       ShouldNotReachHere();
 973   }
 974 }
 975 
 976 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
 977   if (op->init_check()) {
 978     __ lbu(t0, Address(op->klass()->as_register(),
 979                        InstanceKlass::init_state_offset()));
 980     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 981     __ mv(t1, (u1)InstanceKlass::fully_initialized);
 982     add_debug_info_for_null_check_here(op->stub()->info());
 983     __ bne(t0, t1, *op->stub()->entry(), /* is_far */ true);
 984   }
 985 
 986   __ allocate_object(op->obj()->as_register(),
 987                      op->tmp1()->as_register(),
 988                      op->tmp2()->as_register(),
 989                      op->header_size(),
 990                      op->object_size(),
 991                      op->klass()->as_register(),
 992                      *op->stub()->entry());
 993 
 994   __ bind(*op->stub()->continuation());
 995 }
 996 
 997 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
 998   Register len = op->len()->as_register();
 999 
1000   if (UseSlowPath ||
1001       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1002       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1003     __ j(*op->stub()->entry());
1004   } else {
1005     Register tmp1 = op->tmp1()->as_register();
1006     Register tmp2 = op->tmp2()->as_register();
1007     Register tmp3 = op->tmp3()->as_register();
1008     if (len == tmp1) {
1009       tmp1 = tmp3;
1010     } else if (len == tmp2) {
1011       tmp2 = tmp3;
1012     } else if (len == tmp3) {
1013       // everything is ok
1014     } else {
1015       __ mv(tmp3, len);
1016     }
1017     __ allocate_array(op->obj()->as_register(),
1018                       len,
1019                       tmp1,
1020                       tmp2,
1021                       arrayOopDesc::base_offset_in_bytes(op->type()),
1022                       array_element_size(op->type()),
1023                       op->klass()->as_register(),
1024                       *op->stub()->entry(),
1025                       op->zero_array());
1026   }
1027   __ bind(*op->stub()->continuation());
1028 }
1029 
1030 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
1031                                         ciProfileData *data, Register recv) {
1032   int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1033   __ profile_receiver_type(recv, mdo, mdp_offset);
1034 }
1035 
1036 void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) {
1037   ciMethod* method = op->profiled_method();
1038   assert(method != nullptr, "Should have method");
1039   int bci = op->profiled_bci();
1040   *md = method->method_data_or_null();
1041   guarantee(*md != nullptr, "Sanity");
1042   *data = ((*md)->bci_to_data(bci));
1043   assert(*data != nullptr, "need data for type check");
1044   assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1045 }
1046 
1047 void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Register Rtmp1,
1048                                                Register k_RInfo, Register klass_RInfo,
1049                                                Label *failure_target, Label *success_target) {
1050   // get object class
1051   // not a safepoint as obj null check happens earlier
1052   __ load_klass(klass_RInfo, obj);
1053   if (k->is_loaded()) {
1054     // See if we get an immediate positive hit
1055     __ ld(t0, Address(klass_RInfo, int64_t(k->super_check_offset())));
1056     if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1057       __ bne(k_RInfo, t0, *failure_target, /* is_far */ true);
1058       // successful cast, fall through to profile or jump
1059     } else {
1060       // See if we get an immediate positive hit
1061       __ beq(k_RInfo, t0, *success_target);
1062       // check for self
1063       __ beq(klass_RInfo, k_RInfo, *success_target);
1064 
1065       __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo
1066       __ sd(k_RInfo, Address(sp, 0));             // sub klass
1067       __ sd(klass_RInfo, Address(sp, wordSize));  // super klass
1068       __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1069       // load result to k_RInfo
1070       __ ld(k_RInfo, Address(sp, 0));
1071       __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
1072       // result is a boolean
1073       __ beqz(k_RInfo, *failure_target, /* is_far */ true);
1074       // successful cast, fall through to profile or jump
1075     }
1076   } else {
1077     // perform the fast part of the checking logic
1078     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1079     // call out-of-line instance of __ check_klass_subtytpe_slow_path(...)
1080     __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo
1081     __ sd(klass_RInfo, Address(sp, wordSize));  // sub klass
1082     __ sd(k_RInfo, Address(sp, 0));             // super klass
1083     __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1084     // load result to k_RInfo
1085     __ ld(k_RInfo, Address(sp, 0));
1086     __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
1087     // result is a boolean
1088     __ beqz(k_RInfo, *failure_target, /* is_far */ true);
1089     // successful cast, fall thriugh to profile or jump
1090   }
1091 }
1092 
1093 void LIR_Assembler::profile_object(ciMethodData* md, ciProfileData* data, Register obj,
1094                                    Register k_RInfo, Register klass_RInfo, Label* obj_is_null) {
1095   Register mdo = klass_RInfo;
1096   __ mov_metadata(mdo, md->constant_encoding());
1097   Label not_null;
1098   __ bnez(obj, not_null);
1099   // Object is null, update MDO and exit
1100   Address data_addr = __ form_address(t1, mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1101   __ lbu(t0, data_addr);
1102   __ ori(t0, t0, BitData::null_seen_byte_constant());
1103   __ sb(t0, data_addr);
1104   __ j(*obj_is_null);
1105   __ bind(not_null);
1106 
1107   Register recv = k_RInfo;
1108   __ load_klass(recv, obj);
1109   type_profile_helper(mdo, md, data, recv);
1110 }
1111 
1112 void LIR_Assembler::typecheck_loaded(LIR_OpTypeCheck *op, ciKlass* k, Register k_RInfo) {
1113   if (!k->is_loaded()) {
1114     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1115   } else {
1116     __ mov_metadata(k_RInfo, k->constant_encoding());
1117   }
1118 }
1119 
1120 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1121   Register obj = op->object()->as_register();
1122   Register k_RInfo = op->tmp1()->as_register();
1123   Register klass_RInfo = op->tmp2()->as_register();
1124   Register dst = op->result_opr()->as_register();
1125   ciKlass* k = op->klass();
1126   Register Rtmp1 = noreg;
1127 
1128   // check if it needs to be profiled
1129   ciMethodData* md = nullptr;
1130   ciProfileData* data = nullptr;
1131 
1132   const bool should_profile = op->should_profile();
1133   if (should_profile) {
1134     data_check(op, &md, &data);
1135   }
1136   Label* success_target = success;
1137   Label* failure_target = failure;
1138 
1139   if (obj == k_RInfo) {
1140     k_RInfo = dst;
1141   } else if (obj == klass_RInfo) {
1142     klass_RInfo = dst;
1143   }
1144   Rtmp1 = op->tmp3()->as_register();
1145   select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1146 
1147   assert_different_registers(obj, k_RInfo, klass_RInfo);
1148 
1149   if (should_profile) {
1150     profile_object(md, data, obj, k_RInfo, klass_RInfo, obj_is_null);
1151   } else {
1152     __ beqz(obj, *obj_is_null);
1153   }
1154 
1155   typecheck_loaded(op, k, k_RInfo);
1156   __ verify_oop(obj);
1157 
1158   if (op->fast_check()) {
1159     // get object class
1160     // not a safepoint as obj null check happens earlier
1161     __ load_klass(t0, obj, t1);
1162     __ bne(t0, k_RInfo, *failure_target, /* is_far */ true);
1163     // successful cast, fall through to profile or jump
1164   } else {
1165     typecheck_helper_slowcheck(k, obj, Rtmp1, k_RInfo, klass_RInfo, failure_target, success_target);
1166   }
1167 
1168   __ j(*success);
1169 }
1170 
1171 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1172   const bool should_profile = op->should_profile();
1173 
1174   LIR_Code code = op->code();
1175   if (code == lir_store_check) {
1176     typecheck_lir_store(op, should_profile);
1177   } else if (code == lir_checkcast) {
1178     Register obj = op->object()->as_register();
1179     Register dst = op->result_opr()->as_register();
1180     Label success;
1181     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1182     __ bind(success);
1183     if (dst != obj) {
1184       __ mv(dst, obj);
1185     }
1186   } else if (code == lir_instanceof) {
1187     Register obj = op->object()->as_register();
1188     Register dst = op->result_opr()->as_register();
1189     Label success, failure, done;
1190     emit_typecheck_helper(op, &success, &failure, &failure);
1191     __ bind(failure);
1192     __ mv(dst, zr);
1193     __ j(done);
1194     __ bind(success);
1195     __ mv(dst, 1);
1196     __ bind(done);
1197   } else {
1198     ShouldNotReachHere();
1199   }
1200 }
1201 
1202 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1203   Register addr;
1204   if (op->addr()->is_register()) {
1205     addr = as_reg(op->addr());
1206   } else {
1207     assert(op->addr()->is_address(), "what else?");
1208     LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1209     assert(addr_ptr->disp() == 0, "need 0 disp");
1210     assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1211     addr = as_reg(addr_ptr->base());
1212   }
1213   Register newval = as_reg(op->new_value());
1214   Register cmpval = as_reg(op->cmp_value());
1215 
1216   if (op->code() == lir_cas_obj) {
1217     if (UseCompressedOops) {
1218       Register tmp1 = op->tmp1()->as_register();
1219       assert(op->tmp1()->is_valid(), "must be");
1220       Register tmp2 = op->tmp2()->as_register();
1221       assert(op->tmp2()->is_valid(), "must be");
1222 
1223       __ encode_heap_oop(tmp1, cmpval);
1224       cmpval = tmp1;
1225       __ encode_heap_oop(tmp2, newval);
1226       newval = tmp2;
1227       caswu(addr, newval, cmpval);
1228     } else {
1229       casl(addr, newval, cmpval);
1230     }
1231   } else if (op->code() == lir_cas_int) {
1232     casw(addr, newval, cmpval);
1233   } else {
1234     casl(addr, newval, cmpval);
1235   }
1236 
1237   if (op->result_opr()->is_valid()) {
1238     assert(op->result_opr()->is_register(), "need a register");
1239     __ mv(as_reg(op->result_opr()), t0); // cas result in t0, and 0 for success
1240   }
1241 }
1242 
1243 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1244   switch (code) {
1245     case lir_abs:  __ fabs_d(dest->as_double_reg(), value->as_double_reg()); break;
1246     case lir_sqrt: __ fsqrt_d(dest->as_double_reg(), value->as_double_reg()); break;
1247     default:       ShouldNotReachHere();
1248   }
1249 }
1250 
1251 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1252   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1253   Register Rleft = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1254   if (dst->is_single_cpu()) {
1255     Register Rdst = dst->as_register();
1256     if (right->is_constant()) {
1257       int right_const = right->as_jint();
1258       if (Assembler::is_simm12(right_const)) {
1259         logic_op_imm(Rdst, Rleft, right_const, code);
1260         __ sext(Rdst, Rdst, 32);
1261      } else {
1262         __ mv(t0, right_const);
1263         logic_op_reg32(Rdst, Rleft, t0, code);
1264      }
1265     } else {
1266       Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo();
1267       logic_op_reg32(Rdst, Rleft, Rright, code);
1268     }
1269   } else {
1270     Register Rdst = dst->as_register_lo();
1271     if (right->is_constant()) {
1272       long right_const = right->as_jlong();
1273       if (Assembler::is_simm12(right_const)) {
1274         logic_op_imm(Rdst, Rleft, right_const, code);
1275       } else {
1276         __ mv(t0, right_const);
1277         logic_op_reg(Rdst, Rleft, t0, code);
1278       }
1279     } else {
1280       Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo();
1281       logic_op_reg(Rdst, Rleft, Rright, code);
1282     }
1283   }
1284 }
1285 
1286 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op) {
1287   ShouldNotCallThis();
1288 }
1289 
1290 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1291   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1292     bool is_unordered_less = (code == lir_ucmp_fd2i);
1293     if (left->is_single_fpu()) {
1294       __ float_cmp(true, is_unordered_less ? -1 : 1,
1295                    left->as_float_reg(), right->as_float_reg(), dst->as_register());
1296     } else if (left->is_double_fpu()) {
1297       __ float_cmp(false, is_unordered_less ? -1 : 1,
1298                    left->as_double_reg(), right->as_double_reg(), dst->as_register());
1299     } else {
1300       ShouldNotReachHere();
1301     }
1302   } else if (code == lir_cmp_l2i) {
1303     __ cmp_l2i(dst->as_register(), left->as_register_lo(), right->as_register_lo());
1304   } else {
1305     ShouldNotReachHere();
1306   }
1307 }
1308 
1309 void LIR_Assembler::align_call(LIR_Code code) {
1310   // With RVC a call instruction may get 2-byte aligned.
1311   // The address of the call instruction needs to be 4-byte aligned to
1312   // ensure that it does not span a cache line so that it can be patched.
1313   __ align(NativeInstruction::instruction_size);
1314 }
1315 
1316 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1317   Assembler::IncompressibleScope scope(_masm);
1318   address call = __ reloc_call(Address(op->addr(), rtype));
1319   if (call == nullptr) {
1320     bailout("reloc call address stub overflow");
1321     return;
1322   }
1323   add_call_info(code_offset(), op->info());
1324   __ post_call_nop();
1325 }
1326 
1327 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1328   Assembler::IncompressibleScope scope(_masm);
1329   address call = __ ic_call(op->addr());
1330   if (call == nullptr) {
1331     bailout("reloc call address stub overflow");
1332     return;
1333   }
1334   add_call_info(code_offset(), op->info());
1335   __ post_call_nop();
1336 }
1337 
1338 void LIR_Assembler::emit_static_call_stub() {
1339   address call_pc = __ pc();
1340   MacroAssembler::assert_alignment(call_pc);
1341   address stub = __ start_a_stub(call_stub_size());
1342   if (stub == nullptr) {
1343     bailout("static call stub overflow");
1344     return;
1345   }
1346 
1347   int start = __ offset();
1348 
1349   __ relocate(static_stub_Relocation::spec(call_pc));
1350   __ emit_static_call_stub();
1351 
1352   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
1353          <= call_stub_size(), "stub too big");
1354   __ end_a_stub();
1355 }
1356 
1357 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1358   assert(exceptionOop->as_register() == x10, "must match");
1359   assert(exceptionPC->as_register() == x13, "must match");
1360 
1361   // exception object is not added to oop map by LinearScan
1362   // (LinearScan assumes that no oops are in fixed registers)
1363   info->add_register_oop(exceptionOop);
1364   StubId unwind_id;
1365 
1366   // get current pc information
1367   // pc is only needed if the method has an exception handler, the unwind code does not need it.
1368   if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
1369     // As no instructions have been generated yet for this LIR node it's
1370     // possible that an oop map already exists for the current offset.
1371     // In that case insert an dummy NOP here to ensure all oop map PCs
1372     // are unique. See JDK-8237483.
1373     __ nop();
1374   }
1375   int pc_for_athrow_offset = __ offset();
1376   InternalAddress pc_for_athrow(__ pc());
1377   __ la(exceptionPC->as_register(), pc_for_athrow);
1378   add_call_info(pc_for_athrow_offset, info); // for exception handler
1379 
1380   __ verify_not_null_oop(x10);
1381   // search an exception handler (x10: exception oop, x13: throwing pc)
1382   if (compilation()->has_fpu_code()) {
1383     unwind_id = StubId::c1_handle_exception_id;
1384   } else {
1385     unwind_id = StubId::c1_handle_exception_nofpu_id;
1386   }
1387   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
1388   __ nop();
1389 }
1390 
1391 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1392   assert(exceptionOop->as_register() == x10, "must match");
1393   __ j(_unwind_handler_entry);
1394 }
1395 
1396 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
1397   Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1398   Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
1399   Register count_reg = count->as_register();
1400   if (dest->is_single_cpu()) {
1401     assert (dest->type() == T_INT, "unexpected result type");
1402     assert (left->type() == T_INT, "unexpected left type");
1403     __ andi(t0, count_reg, 31); // should not shift more than 31 bits
1404     switch (code) {
1405       case lir_shl:  __ sllw(dest_reg, left_reg, t0); break;
1406       case lir_shr:  __ sraw(dest_reg, left_reg, t0); break;
1407       case lir_ushr: __ srlw(dest_reg, left_reg, t0); break;
1408       default: ShouldNotReachHere();
1409     }
1410   } else if (dest->is_double_cpu()) {
1411     __ andi(t0, count_reg, 63); // should not shift more than 63 bits
1412     switch (code) {
1413       case lir_shl:  __ sll(dest_reg, left_reg, t0); break;
1414       case lir_shr:  __ sra(dest_reg, left_reg, t0); break;
1415       case lir_ushr: __ srl(dest_reg, left_reg, t0); break;
1416       default: ShouldNotReachHere();
1417     }
1418   } else {
1419     ShouldNotReachHere();
1420   }
1421 }
1422 
1423 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
1424   Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
1425   Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
1426   if (dest->is_single_cpu()) {
1427     assert (dest->type() == T_INT, "unexpected result type");
1428     assert (left->type() == T_INT, "unexpected left type");
1429     count &= 0x1f;
1430     if (count != 0) {
1431       switch (code) {
1432         case lir_shl:  __ slliw(dest_reg, left_reg, count); break;
1433         case lir_shr:  __ sraiw(dest_reg, left_reg, count); break;
1434         case lir_ushr: __ srliw(dest_reg, left_reg, count); break;
1435         default: ShouldNotReachHere();
1436       }
1437     } else {
1438       move_regs(left_reg, dest_reg);
1439     }
1440   } else if (dest->is_double_cpu()) {
1441     count &= 0x3f;
1442     if (count != 0) {
1443       switch (code) {
1444         case lir_shl:  __ slli(dest_reg, left_reg, count); break;
1445         case lir_shr:  __ srai(dest_reg, left_reg, count); break;
1446         case lir_ushr: __ srli(dest_reg, left_reg, count); break;
1447         default: ShouldNotReachHere();
1448       }
1449     } else {
1450       move_regs(left->as_register_lo(), dest->as_register_lo());
1451     }
1452   } else {
1453     ShouldNotReachHere();
1454   }
1455 }
1456 
1457 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
1458   Register obj = op->obj_opr()->as_register();  // may not be an oop
1459   Register hdr = op->hdr_opr()->as_register();
1460   Register lock = op->lock_opr()->as_register();
1461   Register temp = op->scratch_opr()->as_register();
1462   if (op->code() == lir_lock) {
1463     // add debug info for NullPointerException only if one is possible
1464     int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
1465     if (op->info() != nullptr) {
1466       add_debug_info_for_null_check(null_check_offset, op->info());
1467     }
1468   } else if (op->code() == lir_unlock) {
1469     __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
1470   } else {
1471     Unimplemented();
1472   }
1473   __ bind(*op->stub()->continuation());
1474 }
1475 
1476 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
1477   Register obj = op->obj()->as_pointer_register();
1478   Register result = op->result_opr()->as_pointer_register();
1479 
1480   CodeEmitInfo* info = op->info();
1481   if (info != nullptr) {
1482     add_debug_info_for_null_check_here(info);
1483   }
1484 
1485   __ load_klass(result, obj);
1486 }
1487 
1488 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
1489   ciMethod* method = op->profiled_method();
1490   int bci          = op->profiled_bci();
1491 
1492   // Update counter for all call types
1493   ciMethodData* md = method->method_data_or_null();
1494   guarantee(md != nullptr, "Sanity");
1495   ciProfileData* data = md->bci_to_data(bci);
1496   assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
1497   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
1498   Register mdo  = op->mdo()->as_register();
1499   __ mov_metadata(mdo, md->constant_encoding());
1500   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1501   // Perform additional virtual call profiling for invokevirtual and
1502   // invokeinterface bytecodes
1503   if (op->should_profile_receiver_type()) {
1504     assert(op->recv()->is_single_cpu(), "recv must be allocated");
1505     Register recv = op->recv()->as_register();
1506     assert_different_registers(mdo, recv);
1507     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
1508     ciKlass* known_klass = op->known_holder();
1509     if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
1510       // We know the type that will be seen at this call site; we can
1511       // statically update the MethodData* rather than needing to do
1512       // dynamic tests on the receiver type
1513       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
1514       for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
1515         ciKlass* receiver = vc_data->receiver(i);
1516         if (known_klass->equals(receiver)) {
1517           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
1518           __ increment(data_addr, DataLayout::counter_increment);
1519           return;
1520         }
1521       }
1522       // Receiver type is not found in profile data.
1523       // Fall back to runtime helper to handle the rest at runtime.
1524       __ mov_metadata(recv, known_klass->constant_encoding());
1525     } else {
1526       __ load_klass(recv, recv);
1527     }
1528     type_profile_helper(mdo, md, data, recv);
1529   } else {
1530     // Static call
1531     __ increment(counter_addr, DataLayout::counter_increment);
1532   }
1533 }
1534 
1535 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
1536   __ la(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
1537 }
1538 
1539 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
1540   assert(op->crc()->is_single_cpu(),  "crc must be register");
1541   assert(op->val()->is_single_cpu(),  "byte value must be register");
1542   assert(op->result_opr()->is_single_cpu(), "result must be register");
1543   Register crc = op->crc()->as_register();
1544   Register val = op->val()->as_register();
1545   Register res = op->result_opr()->as_register();
1546 
1547   assert_different_registers(val, crc, res);
1548   __ la(res, ExternalAddress(StubRoutines::crc_table_addr()));
1549 
1550   __ notr(crc, crc); // ~crc
1551   __ zext(crc, crc, 32);
1552   __ update_byte_crc32(crc, val, res);
1553   __ notr(res, crc); // ~crc
1554 }
1555 
1556 void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
1557                                    Register tmp, Label &next, Label &none,
1558                                    Address mdo_addr) {
1559   if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
1560     if (exact_klass != nullptr) {
1561       __ mov_metadata(tmp, exact_klass->constant_encoding());
1562     } else {
1563       __ load_klass(tmp, tmp);
1564     }
1565 
1566     __ ld(t1, mdo_addr);
1567     __ xorr(tmp, tmp, t1);
1568     __ andi(t0, tmp, TypeEntries::type_klass_mask);
1569     // klass seen before, nothing to do. The unknown bit may have been
1570     // set already but no need to check.
1571     __ beqz(t0, next);
1572 
1573     // already unknown. Nothing to do anymore.
1574     __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1575     __ bnez(t0, next);
1576 
1577     if (TypeEntries::is_type_none(current_klass)) {
1578       __ beqz(t1, none);
1579       __ mv(t0, (u1)TypeEntries::null_seen);
1580       __ beq(t0, t1, none);
1581       // There is a chance that the checks above
1582       // fail if another thread has just set the
1583       // profiling to this obj's klass
1584       __ membar(MacroAssembler::LoadLoad);
1585       __ xorr(tmp, tmp, t1); // get back original value before XOR
1586       __ ld(t1, mdo_addr);
1587       __ xorr(tmp, tmp, t1);
1588       __ andi(t0, tmp, TypeEntries::type_klass_mask);
1589       __ beqz(t0, next);
1590     }
1591   } else {
1592     assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
1593            ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
1594 
1595     __ ld(tmp, mdo_addr);
1596     // already unknown. Nothing to do anymore.
1597     __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1598     __ bnez(t0, next);
1599   }
1600 
1601   // different than before. Cannot keep accurate profile.
1602   __ ld(t1, mdo_addr);
1603   __ ori(t1, t1, TypeEntries::type_unknown);
1604   __ sd(t1, mdo_addr);
1605 
1606   if (TypeEntries::is_type_none(current_klass)) {
1607     __ j(next);
1608 
1609     __ bind(none);
1610     // first time here. Set profile type.
1611     __ sd(tmp, mdo_addr);
1612 #ifdef ASSERT
1613     __ andi(tmp, tmp, TypeEntries::type_mask);
1614     __ verify_klass_ptr(tmp);
1615 #endif
1616   }
1617 }
1618 
1619 void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp,
1620                                       Address mdo_addr, Label &next) {
1621   // There's a single possible klass at this profile point
1622   assert(exact_klass != nullptr, "should be");
1623   if (TypeEntries::is_type_none(current_klass)) {
1624     __ mov_metadata(tmp, exact_klass->constant_encoding());
1625     __ ld(t1, mdo_addr);
1626     __ xorr(tmp, tmp, t1);
1627     __ andi(t0, tmp, TypeEntries::type_klass_mask);
1628     __ beqz(t0, next);
1629 #ifdef ASSERT
1630   {
1631     Label ok;
1632     __ ld(t0, mdo_addr);
1633     __ beqz(t0, ok);
1634     __ mv(t1, (u1)TypeEntries::null_seen);
1635     __ beq(t0, t1, ok);
1636     // may have been set by another thread
1637     __ membar(MacroAssembler::LoadLoad);
1638     __ mov_metadata(t0, exact_klass->constant_encoding());
1639     __ ld(t1, mdo_addr);
1640     __ xorr(t1, t0, t1);
1641     __ andi(t1, t1, TypeEntries::type_mask);
1642     __ beqz(t1, ok);
1643 
1644     __ stop("unexpected profiling mismatch");
1645     __ bind(ok);
1646   }
1647 #endif
1648     // first time here. Set profile type.
1649     __ sd(tmp, mdo_addr);
1650 #ifdef ASSERT
1651     __ andi(tmp, tmp, TypeEntries::type_mask);
1652     __ verify_klass_ptr(tmp);
1653 #endif
1654   } else {
1655     assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
1656            ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
1657 
1658     __ ld(tmp, mdo_addr);
1659     // already unknown. Nothing to do anymore.
1660     __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown));
1661     __ bnez(t0, next);
1662 
1663     __ ori(tmp, tmp, TypeEntries::type_unknown);
1664     __ sd(tmp, mdo_addr);
1665   }
1666 }
1667 
1668 void LIR_Assembler::check_null(Register tmp, Label &update, intptr_t current_klass,
1669                                Address mdo_addr, bool do_update, Label &next) {
1670   __ bnez(tmp, update);
1671   if (!TypeEntries::was_null_seen(current_klass)) {
1672     __ ld(t1, mdo_addr);
1673     __ ori(t1, t1, TypeEntries::null_seen);
1674     __ sd(t1, mdo_addr);
1675   }
1676   if (do_update) {
1677     __ j(next);
1678   }
1679 }
1680 
1681 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
1682   COMMENT("emit_profile_type {");
1683   Register obj = op->obj()->as_register();
1684   Register tmp = op->tmp()->as_pointer_register();
1685   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
1686   ciKlass* exact_klass = op->exact_klass();
1687   intptr_t current_klass = op->current_klass();
1688   bool not_null = op->not_null();
1689   bool no_conflict = op->no_conflict();
1690 
1691   Label update, next, none;
1692 
1693   bool do_null = !not_null;
1694   bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
1695   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
1696 
1697   assert(do_null || do_update, "why are we here?");
1698   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
1699   assert_different_registers(tmp, t0, t1, mdo_addr.base());
1700 
1701   __ verify_oop(obj);
1702 
1703   if (tmp != obj) {
1704     __ mv(tmp, obj);
1705   }
1706   if (do_null) {
1707     check_null(tmp, update, current_klass, mdo_addr, do_update, next);
1708 #ifdef ASSERT
1709   } else {
1710     __ bnez(tmp, update);
1711     __ stop("unexpected null obj");
1712 #endif
1713   }
1714 
1715   __ bind(update);
1716 
1717   if (do_update) {
1718 #ifdef ASSERT
1719     if (exact_klass != nullptr) {
1720       check_exact_klass(tmp, exact_klass);
1721     }
1722 #endif
1723     if (!no_conflict) {
1724       check_conflict(exact_klass, current_klass, tmp, next, none, mdo_addr);
1725     } else {
1726       check_no_conflict(exact_klass, current_klass, tmp, mdo_addr, next);
1727     }
1728 
1729     __ bind(next);
1730   }
1731   COMMENT("} emit_profile_type");
1732 }
1733 
1734 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
1735   Unimplemented();
1736 }
1737 
1738 void LIR_Assembler::align_backward_branch_target() { }
1739 
1740 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
1741   // tmp must be unused
1742   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
1743 
1744   if (left->is_single_cpu()) {
1745     assert(dest->is_single_cpu(), "expect single result reg");
1746     __ negw(dest->as_register(), left->as_register());
1747   } else if (left->is_double_cpu()) {
1748     assert(dest->is_double_cpu(), "expect double result reg");
1749     __ neg(dest->as_register_lo(), left->as_register_lo());
1750   } else if (left->is_single_fpu()) {
1751     assert(dest->is_single_fpu(), "expect single float result reg");
1752     __ fneg_s(dest->as_float_reg(), left->as_float_reg());
1753   } else {
1754     assert(left->is_double_fpu(), "expect double float operand reg");
1755     assert(dest->is_double_fpu(), "expect double float result reg");
1756     __ fneg_d(dest->as_double_reg(), left->as_double_reg());
1757   }
1758 }
1759 
1760 
1761 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1762   if (patch_code != lir_patch_none) {
1763     deoptimize_trap(info);
1764     return;
1765   }
1766 
1767   LIR_Address* adr = addr->as_address_ptr();
1768   Register dst = dest->as_pointer_register();
1769 
1770   assert_different_registers(dst, t0);
1771   if (adr->base()->is_valid() && dst == adr->base()->as_pointer_register() && (!adr->index()->is_cpu_register())) {
1772     int scale = adr->scale();
1773     intptr_t offset = adr->disp();
1774     LIR_Opr index_op = adr->index();
1775     if (index_op->is_constant()) {
1776       offset += ((intptr_t)index_op->as_constant_ptr()->as_jint()) << scale;
1777     }
1778 
1779     if (!Assembler::is_simm12(offset)) {
1780       __ la(t0, as_Address(adr));
1781       __ mv(dst, t0);
1782       return;
1783     }
1784   }
1785 
1786   __ la(dst, as_Address(adr));
1787 }
1788 
1789 
1790 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
1791   assert(!tmp->is_valid(), "don't need temporary");
1792 
1793   Assembler::IncompressibleScope scope(_masm);
1794   // Post call nops must be natural aligned due to cmodx rules.
1795   align_call(lir_rtcall);
1796 
1797   __ rt_call(dest);
1798 
1799   if (info != nullptr) {
1800     add_call_info_here(info);
1801   }
1802   __ post_call_nop();
1803 }
1804 
1805 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
1806   if (dest->is_address() || src->is_address()) {
1807     move_op(src, dest, type, lir_patch_none, info, /* wide */ false);
1808   } else {
1809     ShouldNotReachHere();
1810   }
1811 }
1812 
1813 #ifdef ASSERT
1814 // emit run-time assertion
1815 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
1816   assert(op->code() == lir_assert, "must be");
1817 
1818   Label ok;
1819   if (op->in_opr1()->is_valid()) {
1820     assert(op->in_opr2()->is_valid(), "both operands must be valid");
1821     bool is_unordered = false;
1822     LIR_Condition cond = op->condition();
1823     emit_branch(cond, op->in_opr1(), op->in_opr2(), ok, /* is_far */ false,
1824                 /* is_unordered */(cond == lir_cond_greaterEqual || cond == lir_cond_greater) ? false : true);
1825   } else {
1826     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
1827     assert(op->condition() == lir_cond_always, "no other conditions allowed");
1828   }
1829 
1830   if (op->halt()) {
1831     const char* str = __ code_string(op->msg());
1832     __ stop(str);
1833   } else {
1834     breakpoint();
1835   }
1836   __ bind(ok);
1837 }
1838 #endif
1839 
1840 #ifndef PRODUCT
1841 #define COMMENT(x)   do { __ block_comment(x); } while (0)
1842 #else
1843 #define COMMENT(x)
1844 #endif
1845 
1846 void LIR_Assembler::membar() {
1847   COMMENT("membar");
1848   __ membar(MacroAssembler::AnyAny);
1849 }
1850 
1851 void LIR_Assembler::membar_acquire() {
1852   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
1853 }
1854 
1855 void LIR_Assembler::membar_release() {
1856   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1857 }
1858 
1859 void LIR_Assembler::membar_loadload() {
1860   __ membar(MacroAssembler::LoadLoad);
1861 }
1862 
1863 void LIR_Assembler::membar_storestore() {
1864   __ membar(MacroAssembler::StoreStore);
1865 }
1866 
1867 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
1868 
1869 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
1870 
1871 void LIR_Assembler::on_spin_wait() {
1872   __ pause();
1873 }
1874 
1875 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
1876   __ mv(result_reg->as_register(), xthread);
1877 }
1878 
1879 void LIR_Assembler::peephole(LIR_List *lir) {}
1880 
1881 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
1882   Address addr = as_Address(src->as_address_ptr());
1883   BasicType type = src->type();
1884   bool is_oop = is_reference_type(type);
1885 
1886   get_op(type);
1887 
1888   switch (code) {
1889     case lir_xadd:
1890       {
1891         RegisterOrConstant inc;
1892         Register tmp = as_reg(tmp_op);
1893         Register dst = as_reg(dest);
1894         if (data->is_constant()) {
1895           inc = RegisterOrConstant(as_long(data));
1896           assert_different_registers(dst, addr.base(), tmp);
1897           assert_different_registers(tmp, t0);
1898         } else {
1899           inc = RegisterOrConstant(as_reg(data));
1900           assert_different_registers(inc.as_register(), dst, addr.base(), tmp);
1901         }
1902         __ la(tmp, addr);
1903         (_masm->*add)(dst, inc, tmp);
1904         break;
1905       }
1906     case lir_xchg:
1907       {
1908         Register tmp = tmp_op->as_register();
1909         Register obj = as_reg(data);
1910         Register dst = as_reg(dest);
1911         if (is_oop && UseCompressedOops) {
1912           __ encode_heap_oop(t0, obj);
1913           obj = t0;
1914         }
1915         assert_different_registers(obj, addr.base(), tmp);
1916         assert_different_registers(dst, addr.base(), tmp);
1917         __ la(tmp, addr);
1918         (_masm->*xchg)(dst, obj, tmp);
1919         if (is_oop && UseCompressedOops) {
1920           __ decode_heap_oop(dst);
1921         }
1922       }
1923       break;
1924     default:
1925       ShouldNotReachHere();
1926   }
1927   __ membar(MacroAssembler::AnyAny);
1928 }
1929 
1930 int LIR_Assembler::array_element_size(BasicType type) const {
1931   int elem_size = type2aelembytes(type);
1932   return exact_log2(elem_size);
1933 }
1934 
1935 // helper functions which checks for overflow and sets bailout if it
1936 // occurs.  Always returns a valid embeddable pointer but in the
1937 // bailout case the pointer won't be to unique storage.
1938 address LIR_Assembler::float_constant(float f) {
1939   address const_addr = __ float_constant(f);
1940   if (const_addr == nullptr) {
1941     bailout("const section overflow");
1942     return __ code()->consts()->start();
1943   } else {
1944     return const_addr;
1945   }
1946 }
1947 
1948 address LIR_Assembler::double_constant(double d) {
1949   address const_addr = __ double_constant(d);
1950   if (const_addr == nullptr) {
1951     bailout("const section overflow");
1952     return __ code()->consts()->start();
1953   } else {
1954     return const_addr;
1955   }
1956 }
1957 
1958 address LIR_Assembler::int_constant(jlong n) {
1959   address const_addr = __ long_constant(n);
1960   if (const_addr == nullptr) {
1961     bailout("const section overflow");
1962     return __ code()->consts()->start();
1963   } else {
1964     return const_addr;
1965   }
1966 }
1967 
1968 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1969   __ cmpxchg(addr, cmpval, newval, Assembler::int32, Assembler::aq /* acquire */,
1970              Assembler::rl /* release */, t0, true /* result as bool */);
1971   __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
1972   __ membar(MacroAssembler::AnyAny);
1973 }
1974 
1975 void LIR_Assembler::caswu(Register addr, Register newval, Register cmpval) {
1976   __ cmpxchg(addr, cmpval, newval, Assembler::uint32, Assembler::aq /* acquire */,
1977              Assembler::rl /* release */, t0, true /* result as bool */);
1978   __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
1979   __ membar(MacroAssembler::AnyAny);
1980 }
1981 
1982 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1983   __ cmpxchg(addr, cmpval, newval, Assembler::int64, Assembler::aq /* acquire */,
1984              Assembler::rl /* release */, t0, true /* result as bool */);
1985   __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1
1986   __ membar(MacroAssembler::AnyAny);
1987 }
1988 
1989 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
1990   address target = nullptr;
1991 
1992   switch (patching_id(info)) {
1993     case PatchingStub::access_field_id:
1994       target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
1995       break;
1996     case PatchingStub::load_klass_id:
1997       target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
1998       break;
1999     case PatchingStub::load_mirror_id:
2000       target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
2001       break;
2002     case PatchingStub::load_appendix_id:
2003       target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
2004       break;
2005     default: ShouldNotReachHere();
2006   }
2007 
2008   __ far_call(RuntimeAddress(target));
2009   add_call_info_here(info);
2010 }
2011 
2012 void LIR_Assembler::check_exact_klass(Register tmp, ciKlass* exact_klass) {
2013   Label ok;
2014   __ load_klass(tmp, tmp);
2015   __ mov_metadata(t0, exact_klass->constant_encoding());
2016   __ beq(tmp, t0, ok);
2017   __ stop("exact klass and actual klass differ");
2018   __ bind(ok);
2019 }
2020 
2021 void LIR_Assembler::get_op(BasicType type) {
2022   switch (type) {
2023     case T_INT:
2024       xchg = &MacroAssembler::atomic_xchgalw;
2025       add = &MacroAssembler::atomic_addalw;
2026       break;
2027     case T_LONG:
2028       xchg = &MacroAssembler::atomic_xchgal;
2029       add = &MacroAssembler::atomic_addal;
2030       break;
2031     case T_OBJECT:
2032     case T_ARRAY:
2033       if (UseCompressedOops) {
2034         xchg = &MacroAssembler::atomic_xchgalwu;
2035         add = &MacroAssembler::atomic_addalw;
2036       } else {
2037         xchg = &MacroAssembler::atomic_xchgal;
2038         add = &MacroAssembler::atomic_addal;
2039       }
2040       break;
2041     default:
2042       ShouldNotReachHere();
2043   }
2044 }
2045 
2046 // emit_opTypeCheck sub functions
2047 void LIR_Assembler::typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile) {
2048   Register value = op->object()->as_register();
2049   Register array = op->array()->as_register();
2050   Register k_RInfo = op->tmp1()->as_register();
2051   Register klass_RInfo = op->tmp2()->as_register();
2052   Register Rtmp1 = op->tmp3()->as_register();
2053 
2054   CodeStub* stub = op->stub();
2055 
2056   // check if it needs to be profiled
2057   ciMethodData* md = nullptr;
2058   ciProfileData* data = nullptr;
2059 
2060   if (should_profile) {
2061     data_check(op, &md, &data);
2062   }
2063   Label  done;
2064   Label* success_target = &done;
2065   Label* failure_target = stub->entry();
2066 
2067   if (should_profile) {
2068     profile_object(md, data, value, k_RInfo, klass_RInfo, &done);
2069   } else {
2070     __ beqz(value, done);
2071   }
2072 
2073   add_debug_info_for_null_check_here(op->info_for_exception());
2074   __ load_klass(k_RInfo, array);
2075   __ load_klass(klass_RInfo, value);
2076 
2077   lir_store_slowcheck(k_RInfo, klass_RInfo, Rtmp1, success_target, failure_target);
2078 
2079   __ bind(done);
2080 }
2081 
2082 void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, Register Rtmp1,
2083                                         Label* success_target, Label* failure_target) {
2084   // get instance klass (it's already uncompressed)
2085   __ ld(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
2086   // perform the fast part of the checking logic
2087   __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
2088   // call out-of-line instance of __ check_klass_subtype_slow_path(...)
2089   __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo
2090   __ sd(klass_RInfo, Address(sp, wordSize));  // sub klass
2091   __ sd(k_RInfo, Address(sp, 0));             // super klass
2092   __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2093   // load result to k_RInfo
2094   __ ld(k_RInfo, Address(sp, 0));
2095   __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
2096   // result is a boolean
2097   __ beqz(k_RInfo, *failure_target, /* is_far */ true);
2098 }
2099 
2100 void LIR_Assembler::const2reg_helper(LIR_Opr src) {
2101   switch (src->as_constant_ptr()->type()) {
2102     case T_INT:
2103     case T_ADDRESS:
2104     case T_OBJECT:
2105     case T_ARRAY:
2106     case T_METADATA:
2107         const2reg(src, FrameMap::t0_opr, lir_patch_none, nullptr);
2108         break;
2109     case T_LONG:
2110         const2reg(src, FrameMap::t0_long_opr, lir_patch_none, nullptr);
2111         break;
2112     case T_FLOAT:
2113     case T_DOUBLE:
2114     default:
2115       ShouldNotReachHere();
2116   }
2117 }
2118 
2119 void LIR_Assembler::logic_op_reg32(Register dst, Register left, Register right, LIR_Code code) {
2120   switch (code) {
2121     case lir_logic_and: __ andrw(dst, left, right); break;
2122     case lir_logic_or:  __ orrw (dst, left, right); break;
2123     case lir_logic_xor: __ xorrw(dst, left, right); break;
2124     default:            ShouldNotReachHere();
2125   }
2126 }
2127 
2128 void LIR_Assembler::logic_op_reg(Register dst, Register left, Register right, LIR_Code code) {
2129   switch (code) {
2130     case lir_logic_and: __ andr(dst, left, right); break;
2131     case lir_logic_or:  __ orr (dst, left, right); break;
2132     case lir_logic_xor: __ xorr(dst, left, right); break;
2133     default:            ShouldNotReachHere();
2134   }
2135 }
2136 
2137 void LIR_Assembler::logic_op_imm(Register dst, Register left, int right, LIR_Code code) {
2138   switch (code) {
2139     case lir_logic_and: __ andi(dst, left, right); break;
2140     case lir_logic_or:  __ ori (dst, left, right); break;
2141     case lir_logic_xor: __ xori(dst, left, right); break;
2142     default:            ShouldNotReachHere();
2143   }
2144 }
2145 
2146 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2147   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2148   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2149   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2150   __ sd(r, Address(sp, offset_from_rsp_in_bytes));
2151 }
2152 
2153 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2154   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2155   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2156   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2157   __ mv(t0, c);
2158   __ sd(t0, Address(sp, offset_from_rsp_in_bytes));
2159 }
2160 
2161 // Valhalla support
2162 
2163 void LIR_Assembler::check_orig_pc() {
2164   Unimplemented();
2165 }
2166 
2167 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
2168   Unimplemented();
2169   return 0;
2170 }
2171 
2172 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
2173   Unimplemented();
2174 }
2175 
2176 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
2177   Unimplemented();
2178 }
2179 
2180 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
2181   Unimplemented();
2182 }
2183 #undef __