1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_aarch64.inline.hpp"
  47 
  48 
  49 #ifndef PRODUCT
  50 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  51 #else
  52 #define COMMENT(x)
  53 #endif
  54 
  55 NEEDS_CLEANUP // remove this definitions ?
  56 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 
  63 static void select_different_registers(Register preserve,
  64                                        Register extra,
  65                                        Register &tmp1,
  66                                        Register &tmp2) {
  67   if (tmp1 == preserve) {
  68     assert_different_registers(tmp1, tmp2, extra);
  69     tmp1 = extra;
  70   } else if (tmp2 == preserve) {
  71     assert_different_registers(tmp1, tmp2, extra);
  72     tmp2 = extra;
  73   }
  74   assert_different_registers(preserve, tmp1, tmp2);
  75 }
  76 
  77 
  78 
  79 static void select_different_registers(Register preserve,
  80                                        Register extra,
  81                                        Register &tmp1,
  82                                        Register &tmp2,
  83                                        Register &tmp3) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, tmp3, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, tmp3, extra);
  89     tmp2 = extra;
  90   } else if (tmp3 == preserve) {
  91     assert_different_registers(tmp1, tmp2, tmp3, extra);
  92     tmp3 = extra;
  93   }
  94   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  95 }
  96 
  97 
  98 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  99 
 100 
 101 LIR_Opr LIR_Assembler::receiverOpr() {
 102   return FrameMap::receiver_opr;
 103 }
 104 
 105 LIR_Opr LIR_Assembler::osrBufferPointer() {
 106   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 107 }
 108 
 109 //--------------fpu register translations-----------------------
 110 
 111 
 112 address LIR_Assembler::float_constant(float f) {
 113   address const_addr = __ float_constant(f);
 114   if (const_addr == NULL) {
 115     bailout("const section overflow");
 116     return __ code()->consts()->start();
 117   } else {
 118     return const_addr;
 119   }
 120 }
 121 
 122 
 123 address LIR_Assembler::double_constant(double d) {
 124   address const_addr = __ double_constant(d);
 125   if (const_addr == NULL) {
 126     bailout("const section overflow");
 127     return __ code()->consts()->start();
 128   } else {
 129     return const_addr;
 130   }
 131 }
 132 
 133 address LIR_Assembler::int_constant(jlong n) {
 134   address const_addr = __ long_constant(n);
 135   if (const_addr == NULL) {
 136     bailout("const section overflow");
 137     return __ code()->consts()->start();
 138   } else {
 139     return const_addr;
 140   }
 141 }
 142 
 143 void LIR_Assembler::breakpoint() { Unimplemented(); }
 144 
 145 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 146 
 147 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 148 
 149 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
 150 //-------------------------------------------
 151 
 152 static Register as_reg(LIR_Opr op) {
 153   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 154 }
 155 
 156 static jlong as_long(LIR_Opr data) {
 157   jlong result;
 158   switch (data->type()) {
 159   case T_INT:
 160     result = (data->as_jint());
 161     break;
 162   case T_LONG:
 163     result = (data->as_jlong());
 164     break;
 165   default:
 166     ShouldNotReachHere();
 167     result = 0;  // unreachable
 168   }
 169   return result;
 170 }
 171 
 172 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 173   Register base = addr->base()->as_pointer_register();
 174   LIR_Opr opr = addr->index();
 175   if (opr->is_cpu_register()) {
 176     Register index;
 177     if (opr->is_single_cpu())
 178       index = opr->as_register();
 179     else
 180       index = opr->as_register_lo();
 181     assert(addr->disp() == 0, "must be");
 182     switch(opr->type()) {
 183       case T_INT:
 184         return Address(base, index, Address::sxtw(addr->scale()));
 185       case T_LONG:
 186         return Address(base, index, Address::lsl(addr->scale()));
 187       default:
 188         ShouldNotReachHere();
 189       }
 190   } else {
 191     assert(addr->scale() == 0,
 192            "expected for immediate operand, was: %d", addr->scale());
 193     ptrdiff_t offset = ptrdiff_t(addr->disp());
 194     // NOTE: Does not handle any 16 byte vector access.
 195     const uint type_size = type2aelembytes(addr->type(), true);
 196     return __ legitimize_address(Address(base, offset), type_size, tmp);
 197   }
 198   return Address();
 199 }
 200 
 201 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 202   ShouldNotReachHere();
 203   return Address();
 204 }
 205 
 206 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 207   return as_Address(addr, rscratch1);
 208 }
 209 
 210 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 211   return as_Address(addr, rscratch1);  // Ouch
 212   // FIXME: This needs to be much more clever.  See x86.
 213 }
 214 
 215 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
 216 // not encodable as a base + (immediate) offset, generate an explicit address
 217 // calculation to hold the address in a temporary register.
 218 Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
 219   precond(size == 4 || size == 8);
 220   Address addr = frame_map()->address_for_slot(index, adjust);
 221   precond(addr.getMode() == Address::base_plus_offset);
 222   precond(addr.base() == sp);
 223   precond(addr.offset() > 0);
 224   uint mask = size - 1;
 225   assert((addr.offset() & mask) == 0, "scaled offsets only");
 226   return __ legitimize_address(addr, size, tmp);
 227 }
 228 
 229 void LIR_Assembler::osr_entry() {
 230   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 231   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 232   ValueStack* entry_state = osr_entry->state();
 233   int number_of_locks = entry_state->locks_size();
 234 
 235   // we jump here if osr happens with the interpreter
 236   // state set up to continue at the beginning of the
 237   // loop that triggered osr - in particular, we have
 238   // the following registers setup:
 239   //
 240   // r2: osr buffer
 241   //
 242 
 243   // build frame
 244   ciMethod* m = compilation()->method();
 245   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 246 
 247   // OSR buffer is
 248   //
 249   // locals[nlocals-1..0]
 250   // monitors[0..number_of_locks]
 251   //
 252   // locals is a direct copy of the interpreter frame so in the osr buffer
 253   // so first slot in the local array is the last local from the interpreter
 254   // and last slot is local[0] (receiver) from the interpreter
 255   //
 256   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 257   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 258   // in the interpreter frame (the method lock if a sync method)
 259 
 260   // Initialize monitors in the compiled activation.
 261   //   r2: pointer to osr buffer
 262   //
 263   // All other registers are dead at this point and the locals will be
 264   // copied into place by code emitted in the IR.
 265 
 266   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 267   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 268     int monitor_offset = BytesPerWord * method()->max_locals() +
 269       (2 * BytesPerWord) * (number_of_locks - 1);
 270     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 271     // the OSR buffer using 2 word entries: first the lock and then
 272     // the oop.
 273     for (int i = 0; i < number_of_locks; i++) {
 274       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 275 #ifdef ASSERT
 276       // verify the interpreter's monitor has a non-null object
 277       {
 278         Label L;
 279         __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 280         __ cbnz(rscratch1, L);
 281         __ stop("locked object is NULL");
 282         __ bind(L);
 283       }
 284 #endif
 285       __ ldr(r19, Address(OSR_buf, slot_offset + 0));
 286       __ str(r19, frame_map()->address_for_monitor_lock(i));
 287       __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 288       __ str(r19, frame_map()->address_for_monitor_object(i));
 289     }
 290   }
 291 }
 292 
 293 
 294 // inline cache check; done before the frame is built.
 295 int LIR_Assembler::check_icache() {
 296   Register receiver = FrameMap::receiver_opr->as_register();
 297   Register ic_klass = IC_Klass;
 298   int start_offset = __ offset();
 299   __ inline_cache_check(receiver, ic_klass);
 300 
 301   // if icache check fails, then jump to runtime routine
 302   // Note: RECEIVER must still contain the receiver!
 303   Label dont;
 304   __ br(Assembler::EQ, dont);
 305   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 306 
 307   // We align the verified entry point unless the method body
 308   // (including its inline cache check) will fit in a single 64-byte
 309   // icache line.
 310   if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
 311     // force alignment after the cache check.
 312     __ align(CodeEntryAlignment);
 313   }
 314 
 315   __ bind(dont);
 316   return start_offset;
 317 }
 318 
 319 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 320   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 321   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
 322 
 323   Label L_skip_barrier;
 324 
 325   __ mov_metadata(rscratch2, method->holder()->constant_encoding());
 326   __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);
 327   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 328   __ bind(L_skip_barrier);
 329 }
 330 
 331 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 332   if (o == NULL) {
 333     __ mov(reg, zr);
 334   } else {
 335     __ movoop(reg, o, /*immediate*/true);
 336   }
 337 }
 338 
 339 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 340   address target = NULL;
 341   relocInfo::relocType reloc_type = relocInfo::none;
 342 
 343   switch (patching_id(info)) {
 344   case PatchingStub::access_field_id:
 345     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 346     reloc_type = relocInfo::section_word_type;
 347     break;
 348   case PatchingStub::load_klass_id:
 349     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 350     reloc_type = relocInfo::metadata_type;
 351     break;
 352   case PatchingStub::load_mirror_id:
 353     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 354     reloc_type = relocInfo::oop_type;
 355     break;
 356   case PatchingStub::load_appendix_id:
 357     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 358     reloc_type = relocInfo::oop_type;
 359     break;
 360   default: ShouldNotReachHere();
 361   }
 362 
 363   __ far_call(RuntimeAddress(target));
 364   add_call_info_here(info);
 365 }
 366 
 367 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 368   deoptimize_trap(info);
 369 }
 370 
 371 
 372 // This specifies the rsp decrement needed to build the frame
 373 int LIR_Assembler::initial_frame_size_in_bytes() const {
 374   // if rounding, must let FrameMap know!
 375 
 376   return in_bytes(frame_map()->framesize_in_bytes());
 377 }
 378 
 379 
 380 int LIR_Assembler::emit_exception_handler() {
 381   // if the last instruction is a call (typically to do a throw which
 382   // is coming at the end after block reordering) the return address
 383   // must still point into the code area in order to avoid assertion
 384   // failures when searching for the corresponding bci => add a nop
 385   // (was bug 5/14/1999 - gri)
 386   __ nop();
 387 
 388   // generate code for exception handler
 389   address handler_base = __ start_a_stub(exception_handler_size());
 390   if (handler_base == NULL) {
 391     // not enough space left for the handler
 392     bailout("exception handler overflow");
 393     return -1;
 394   }
 395 
 396   int offset = code_offset();
 397 
 398   // the exception oop and pc are in r0, and r3
 399   // no other registers need to be preserved, so invalidate them
 400   __ invalidate_registers(false, true, true, false, true, true);
 401 
 402   // check that there is really an exception
 403   __ verify_not_null_oop(r0);
 404 
 405   // search an exception handler (r0: exception oop, r3: throwing pc)
 406   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));  __ should_not_reach_here();
 407   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 408   __ end_a_stub();
 409 
 410   return offset;
 411 }
 412 
 413 
 414 // Emit the code to remove the frame from the stack in the exception
 415 // unwind path.
 416 int LIR_Assembler::emit_unwind_handler() {
 417 #ifndef PRODUCT
 418   if (CommentedAssembly) {
 419     _masm->block_comment("Unwind handler");
 420   }
 421 #endif
 422 
 423   int offset = code_offset();
 424 
 425   // Fetch the exception from TLS and clear out exception related thread state
 426   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 427   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 428   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 429 
 430   __ bind(_unwind_handler_entry);
 431   __ verify_not_null_oop(r0);
 432   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 433     __ mov(r19, r0);  // Preserve the exception
 434   }
 435 
 436   // Preform needed unlocking
 437   MonitorExitStub* stub = NULL;
 438   if (method()->is_synchronized()) {
 439     monitor_address(0, FrameMap::r0_opr);
 440     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 441     if (UseHeavyMonitors) {
 442       __ b(*stub->entry());
 443     } else {
 444       __ unlock_object(r5, r4, r0, *stub->entry());
 445     }
 446     __ bind(*stub->continuation());
 447   }
 448 
 449   if (compilation()->env()->dtrace_method_probes()) {
 450     __ mov(c_rarg0, rthread);
 451     __ mov_metadata(c_rarg1, method()->constant_encoding());
 452     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 453   }
 454 
 455   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 456     __ mov(r0, r19);  // Restore the exception
 457   }
 458 
 459   // remove the activation and dispatch to the unwind handler
 460   __ block_comment("remove_frame and dispatch to the unwind handler");
 461   __ remove_frame(initial_frame_size_in_bytes());
 462   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 463 
 464   // Emit the slow path assembly
 465   if (stub != NULL) {
 466     stub->emit_code(this);
 467   }
 468 
 469   return offset;
 470 }
 471 
 472 
 473 int LIR_Assembler::emit_deopt_handler() {
 474   // if the last instruction is a call (typically to do a throw which
 475   // is coming at the end after block reordering) the return address
 476   // must still point into the code area in order to avoid assertion
 477   // failures when searching for the corresponding bci => add a nop
 478   // (was bug 5/14/1999 - gri)
 479   __ nop();
 480 
 481   // generate code for exception handler
 482   address handler_base = __ start_a_stub(deopt_handler_size());
 483   if (handler_base == NULL) {
 484     // not enough space left for the handler
 485     bailout("deopt handler overflow");
 486     return -1;
 487   }
 488 
 489   int offset = code_offset();
 490 
 491   __ adr(lr, pc());
 492   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 493   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 494   __ end_a_stub();
 495 
 496   return offset;
 497 }
 498 
 499 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 500   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 501   int pc_offset = code_offset();
 502   flush_debug_info(pc_offset);
 503   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 504   if (info->exception_handlers() != NULL) {
 505     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 506   }
 507 }
 508 
 509 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 510   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 511 
 512   // Pop the stack before the safepoint code
 513   __ remove_frame(initial_frame_size_in_bytes());
 514 
 515   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 516     __ reserved_stack_check();
 517   }
 518 
 519   code_stub->set_safepoint_offset(__ offset());
 520   __ relocate(relocInfo::poll_return_type);
 521   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 522   __ ret(lr);
 523 }
 524 
 525 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 526   guarantee(info != NULL, "Shouldn't be NULL");
 527   __ get_polling_page(rscratch1, relocInfo::poll_type);
 528   add_debug_info_for_branch(info);  // This isn't just debug info:
 529                                     // it's the oop map
 530   __ read_polling_page(rscratch1, relocInfo::poll_type);
 531   return __ offset();
 532 }
 533 
 534 
 535 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 536   if (from_reg == r31_sp)
 537     from_reg = sp;
 538   if (to_reg == r31_sp)
 539     to_reg = sp;
 540   __ mov(to_reg, from_reg);
 541 }
 542 
 543 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 544 
 545 
 546 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 547   assert(src->is_constant(), "should not call otherwise");
 548   assert(dest->is_register(), "should not call otherwise");
 549   LIR_Const* c = src->as_constant_ptr();
 550 
 551   switch (c->type()) {
 552     case T_INT: {
 553       assert(patch_code == lir_patch_none, "no patching handled here");
 554       __ movw(dest->as_register(), c->as_jint());
 555       break;
 556     }
 557 
 558     case T_ADDRESS: {
 559       assert(patch_code == lir_patch_none, "no patching handled here");
 560       __ mov(dest->as_register(), c->as_jint());
 561       break;
 562     }
 563 
 564     case T_LONG: {
 565       assert(patch_code == lir_patch_none, "no patching handled here");
 566       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 567       break;
 568     }
 569 
 570     case T_OBJECT: {
 571         if (patch_code == lir_patch_none) {
 572           jobject2reg(c->as_jobject(), dest->as_register());
 573         } else {
 574           jobject2reg_with_patching(dest->as_register(), info);
 575         }
 576       break;
 577     }
 578 
 579     case T_METADATA: {
 580       if (patch_code != lir_patch_none) {
 581         klass2reg_with_patching(dest->as_register(), info);
 582       } else {
 583         __ mov_metadata(dest->as_register(), c->as_metadata());
 584       }
 585       break;
 586     }
 587 
 588     case T_FLOAT: {
 589       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 590         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 591       } else {
 592         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 593         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 594       }
 595       break;
 596     }
 597 
 598     case T_DOUBLE: {
 599       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 600         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 601       } else {
 602         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 603         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 604       }
 605       break;
 606     }
 607 
 608     default:
 609       ShouldNotReachHere();
 610   }
 611 }
 612 
 613 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 614   LIR_Const* c = src->as_constant_ptr();
 615   switch (c->type()) {
 616   case T_OBJECT:
 617     {
 618       if (! c->as_jobject())
 619         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 620       else {
 621         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 622         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 623       }
 624     }
 625     break;
 626   case T_ADDRESS:
 627     {
 628       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 629       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 630     }
 631   case T_INT:
 632   case T_FLOAT:
 633     {
 634       Register reg = zr;
 635       if (c->as_jint_bits() == 0)
 636         __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 637       else {
 638         __ movw(rscratch1, c->as_jint_bits());
 639         __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
 640       }
 641     }
 642     break;
 643   case T_LONG:
 644   case T_DOUBLE:
 645     {
 646       Register reg = zr;
 647       if (c->as_jlong_bits() == 0)
 648         __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 649                                                  lo_word_offset_in_bytes));
 650       else {
 651         __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
 652         __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
 653                                                         lo_word_offset_in_bytes));
 654       }
 655     }
 656     break;
 657   default:
 658     ShouldNotReachHere();
 659   }
 660 }
 661 
 662 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 663   assert(src->is_constant(), "should not call otherwise");
 664   LIR_Const* c = src->as_constant_ptr();
 665   LIR_Address* to_addr = dest->as_address_ptr();
 666 
 667   void (Assembler::* insn)(Register Rt, const Address &adr);
 668 
 669   switch (type) {
 670   case T_ADDRESS:
 671     assert(c->as_jint() == 0, "should be");
 672     insn = &Assembler::str;
 673     break;
 674   case T_LONG:
 675     assert(c->as_jlong() == 0, "should be");
 676     insn = &Assembler::str;
 677     break;
 678   case T_INT:
 679     assert(c->as_jint() == 0, "should be");
 680     insn = &Assembler::strw;
 681     break;
 682   case T_OBJECT:
 683   case T_ARRAY:
 684     assert(c->as_jobject() == 0, "should be");
 685     if (UseCompressedOops && !wide) {
 686       insn = &Assembler::strw;
 687     } else {
 688       insn = &Assembler::str;
 689     }
 690     break;
 691   case T_CHAR:
 692   case T_SHORT:
 693     assert(c->as_jint() == 0, "should be");
 694     insn = &Assembler::strh;
 695     break;
 696   case T_BOOLEAN:
 697   case T_BYTE:
 698     assert(c->as_jint() == 0, "should be");
 699     insn = &Assembler::strb;
 700     break;
 701   default:
 702     ShouldNotReachHere();
 703     insn = &Assembler::str;  // unreachable
 704   }
 705 
 706   if (info) add_debug_info_for_null_check_here(info);
 707   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 708 }
 709 
 710 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 711   assert(src->is_register(), "should not call otherwise");
 712   assert(dest->is_register(), "should not call otherwise");
 713 
 714   // move between cpu-registers
 715   if (dest->is_single_cpu()) {
 716     if (src->type() == T_LONG) {
 717       // Can do LONG -> OBJECT
 718       move_regs(src->as_register_lo(), dest->as_register());
 719       return;
 720     }
 721     assert(src->is_single_cpu(), "must match");
 722     if (src->type() == T_OBJECT) {
 723       __ verify_oop(src->as_register());
 724     }
 725     move_regs(src->as_register(), dest->as_register());
 726 
 727   } else if (dest->is_double_cpu()) {
 728     if (is_reference_type(src->type())) {
 729       // Surprising to me but we can see move of a long to t_object
 730       __ verify_oop(src->as_register());
 731       move_regs(src->as_register(), dest->as_register_lo());
 732       return;
 733     }
 734     assert(src->is_double_cpu(), "must match");
 735     Register f_lo = src->as_register_lo();
 736     Register f_hi = src->as_register_hi();
 737     Register t_lo = dest->as_register_lo();
 738     Register t_hi = dest->as_register_hi();
 739     assert(f_hi == f_lo, "must be same");
 740     assert(t_hi == t_lo, "must be same");
 741     move_regs(f_lo, t_lo);
 742 
 743   } else if (dest->is_single_fpu()) {
 744     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 745 
 746   } else if (dest->is_double_fpu()) {
 747     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 748 
 749   } else {
 750     ShouldNotReachHere();
 751   }
 752 }
 753 
 754 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 755   precond(src->is_register() && dest->is_stack());
 756 
 757   uint const c_sz32 = sizeof(uint32_t);
 758   uint const c_sz64 = sizeof(uint64_t);
 759 
 760   if (src->is_single_cpu()) {
 761     int index = dest->single_stack_ix();
 762     if (is_reference_type(type)) {
 763       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 764       __ verify_oop(src->as_register());
 765     } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
 766       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 767     } else {
 768       __ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 769     }
 770 
 771   } else if (src->is_double_cpu()) {
 772     int index = dest->double_stack_ix();
 773     Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 774     __ str(src->as_register_lo(), dest_addr_LO);
 775 
 776   } else if (src->is_single_fpu()) {
 777     int index = dest->single_stack_ix();
 778     __ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 779 
 780   } else if (src->is_double_fpu()) {
 781     int index = dest->double_stack_ix();
 782     __ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 783 
 784   } else {
 785     ShouldNotReachHere();
 786   }
 787 }
 788 
 789 
 790 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
 791   LIR_Address* to_addr = dest->as_address_ptr();
 792   PatchingStub* patch = NULL;
 793   Register compressed_src = rscratch1;
 794 
 795   if (patch_code != lir_patch_none) {
 796     deoptimize_trap(info);
 797     return;
 798   }
 799 
 800   if (is_reference_type(type)) {
 801     __ verify_oop(src->as_register());
 802 
 803     if (UseCompressedOops && !wide) {
 804       __ encode_heap_oop(compressed_src, src->as_register());
 805     } else {
 806       compressed_src = src->as_register();
 807     }
 808   }
 809 
 810   int null_check_here = code_offset();
 811   switch (type) {
 812     case T_FLOAT: {
 813       __ strs(src->as_float_reg(), as_Address(to_addr));
 814       break;
 815     }
 816 
 817     case T_DOUBLE: {
 818       __ strd(src->as_double_reg(), as_Address(to_addr));
 819       break;
 820     }
 821 
 822     case T_ARRAY:   // fall through
 823     case T_OBJECT:  // fall through
 824       if (UseCompressedOops && !wide) {
 825         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 826       } else {
 827          __ str(compressed_src, as_Address(to_addr));
 828       }
 829       break;
 830     case T_METADATA:
 831       // We get here to store a method pointer to the stack to pass to
 832       // a dtrace runtime call. This can't work on 64 bit with
 833       // compressed klass ptrs: T_METADATA can be a compressed klass
 834       // ptr or a 64 bit method pointer.
 835       ShouldNotReachHere();
 836       __ str(src->as_register(), as_Address(to_addr));
 837       break;
 838     case T_ADDRESS:
 839       __ str(src->as_register(), as_Address(to_addr));
 840       break;
 841     case T_INT:
 842       __ strw(src->as_register(), as_Address(to_addr));
 843       break;
 844 
 845     case T_LONG: {
 846       __ str(src->as_register_lo(), as_Address_lo(to_addr));
 847       break;
 848     }
 849 
 850     case T_BYTE:    // fall through
 851     case T_BOOLEAN: {
 852       __ strb(src->as_register(), as_Address(to_addr));
 853       break;
 854     }
 855 
 856     case T_CHAR:    // fall through
 857     case T_SHORT:
 858       __ strh(src->as_register(), as_Address(to_addr));
 859       break;
 860 
 861     default:
 862       ShouldNotReachHere();
 863   }
 864   if (info != NULL) {
 865     add_debug_info_for_null_check(null_check_here, info);
 866   }
 867 }
 868 
 869 
 870 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 871   precond(src->is_stack() && dest->is_register());
 872 
 873   uint const c_sz32 = sizeof(uint32_t);
 874   uint const c_sz64 = sizeof(uint64_t);
 875 
 876   if (dest->is_single_cpu()) {
 877     int index = src->single_stack_ix();
 878     if (is_reference_type(type)) {
 879       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 880       __ verify_oop(dest->as_register());
 881     } else if (type == T_METADATA || type == T_ADDRESS) {
 882       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 883     } else {
 884       __ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 885     }
 886 
 887   } else if (dest->is_double_cpu()) {
 888     int index = src->double_stack_ix();
 889     Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 890     __ ldr(dest->as_register_lo(), src_addr_LO);
 891 
 892   } else if (dest->is_single_fpu()) {
 893     int index = src->single_stack_ix();
 894     __ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 895 
 896   } else if (dest->is_double_fpu()) {
 897     int index = src->double_stack_ix();
 898     __ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 899 
 900   } else {
 901     ShouldNotReachHere();
 902   }
 903 }
 904 
 905 
 906 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 907   address target = NULL;
 908   relocInfo::relocType reloc_type = relocInfo::none;
 909 
 910   switch (patching_id(info)) {
 911   case PatchingStub::access_field_id:
 912     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 913     reloc_type = relocInfo::section_word_type;
 914     break;
 915   case PatchingStub::load_klass_id:
 916     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 917     reloc_type = relocInfo::metadata_type;
 918     break;
 919   case PatchingStub::load_mirror_id:
 920     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 921     reloc_type = relocInfo::oop_type;
 922     break;
 923   case PatchingStub::load_appendix_id:
 924     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 925     reloc_type = relocInfo::oop_type;
 926     break;
 927   default: ShouldNotReachHere();
 928   }
 929 
 930   __ far_call(RuntimeAddress(target));
 931   add_call_info_here(info);
 932 }
 933 
 934 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 935 
 936   LIR_Opr temp;
 937   if (type == T_LONG || type == T_DOUBLE)
 938     temp = FrameMap::rscratch1_long_opr;
 939   else
 940     temp = FrameMap::rscratch1_opr;
 941 
 942   stack2reg(src, temp, src->type());
 943   reg2stack(temp, dest, dest->type(), false);
 944 }
 945 
 946 
 947 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 948   LIR_Address* addr = src->as_address_ptr();
 949   LIR_Address* from_addr = src->as_address_ptr();
 950 
 951   if (addr->base()->type() == T_OBJECT) {
 952     __ verify_oop(addr->base()->as_pointer_register());
 953   }
 954 
 955   if (patch_code != lir_patch_none) {
 956     deoptimize_trap(info);
 957     return;
 958   }
 959 
 960   if (info != NULL) {
 961     add_debug_info_for_null_check_here(info);
 962   }
 963   int null_check_here = code_offset();
 964   switch (type) {
 965     case T_FLOAT: {
 966       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 967       break;
 968     }
 969 
 970     case T_DOUBLE: {
 971       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 972       break;
 973     }
 974 
 975     case T_ARRAY:   // fall through
 976     case T_OBJECT:  // fall through
 977       if (UseCompressedOops && !wide) {
 978         __ ldrw(dest->as_register(), as_Address(from_addr));
 979       } else {
 980          __ ldr(dest->as_register(), as_Address(from_addr));
 981       }
 982       break;
 983     case T_METADATA:
 984       // We get here to store a method pointer to the stack to pass to
 985       // a dtrace runtime call. This can't work on 64 bit with
 986       // compressed klass ptrs: T_METADATA can be a compressed klass
 987       // ptr or a 64 bit method pointer.
 988       ShouldNotReachHere();
 989       __ ldr(dest->as_register(), as_Address(from_addr));
 990       break;
 991     case T_ADDRESS:
 992       __ ldr(dest->as_register(), as_Address(from_addr));
 993       break;
 994     case T_INT:
 995       __ ldrw(dest->as_register(), as_Address(from_addr));
 996       break;
 997 
 998     case T_LONG: {
 999       __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
1000       break;
1001     }
1002 
1003     case T_BYTE:
1004       __ ldrsb(dest->as_register(), as_Address(from_addr));
1005       break;
1006     case T_BOOLEAN: {
1007       __ ldrb(dest->as_register(), as_Address(from_addr));
1008       break;
1009     }
1010 
1011     case T_CHAR:
1012       __ ldrh(dest->as_register(), as_Address(from_addr));
1013       break;
1014     case T_SHORT:
1015       __ ldrsh(dest->as_register(), as_Address(from_addr));
1016       break;
1017 
1018     default:
1019       ShouldNotReachHere();
1020   }
1021 
1022   if (is_reference_type(type)) {
1023     if (UseCompressedOops && !wide) {
1024       __ decode_heap_oop(dest->as_register());
1025     }
1026 
1027     if (!UseZGC) {
1028       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1029       __ verify_oop(dest->as_register());
1030     }
1031   }
1032 }
1033 
1034 
1035 int LIR_Assembler::array_element_size(BasicType type) const {
1036   int elem_size = type2aelembytes(type);
1037   return exact_log2(elem_size);
1038 }
1039 
1040 
1041 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1042   switch (op->code()) {
1043   case lir_idiv:
1044   case lir_irem:
1045     arithmetic_idiv(op->code(),
1046                     op->in_opr1(),
1047                     op->in_opr2(),
1048                     op->in_opr3(),
1049                     op->result_opr(),
1050                     op->info());
1051     break;
1052   case lir_fmad:
1053     __ fmaddd(op->result_opr()->as_double_reg(),
1054               op->in_opr1()->as_double_reg(),
1055               op->in_opr2()->as_double_reg(),
1056               op->in_opr3()->as_double_reg());
1057     break;
1058   case lir_fmaf:
1059     __ fmadds(op->result_opr()->as_float_reg(),
1060               op->in_opr1()->as_float_reg(),
1061               op->in_opr2()->as_float_reg(),
1062               op->in_opr3()->as_float_reg());
1063     break;
1064   default:      ShouldNotReachHere(); break;
1065   }
1066 }
1067 
1068 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1069 #ifdef ASSERT
1070   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1071   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1072   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1073 #endif
1074 
1075   if (op->cond() == lir_cond_always) {
1076     if (op->info() != NULL) add_debug_info_for_branch(op->info());
1077     __ b(*(op->label()));
1078   } else {
1079     Assembler::Condition acond;
1080     if (op->code() == lir_cond_float_branch) {
1081       bool is_unordered = (op->ublock() == op->block());
1082       // Assembler::EQ does not permit unordered branches, so we add
1083       // another branch here.  Likewise, Assembler::NE does not permit
1084       // ordered branches.
1085       if ((is_unordered && op->cond() == lir_cond_equal)
1086           || (!is_unordered && op->cond() == lir_cond_notEqual))
1087         __ br(Assembler::VS, *(op->ublock()->label()));
1088       switch(op->cond()) {
1089       case lir_cond_equal:        acond = Assembler::EQ; break;
1090       case lir_cond_notEqual:     acond = Assembler::NE; break;
1091       case lir_cond_less:         acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1092       case lir_cond_lessEqual:    acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1093       case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1094       case lir_cond_greater:      acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1095       default:                    ShouldNotReachHere();
1096         acond = Assembler::EQ;  // unreachable
1097       }
1098     } else {
1099       switch (op->cond()) {
1100         case lir_cond_equal:        acond = Assembler::EQ; break;
1101         case lir_cond_notEqual:     acond = Assembler::NE; break;
1102         case lir_cond_less:         acond = Assembler::LT; break;
1103         case lir_cond_lessEqual:    acond = Assembler::LE; break;
1104         case lir_cond_greaterEqual: acond = Assembler::GE; break;
1105         case lir_cond_greater:      acond = Assembler::GT; break;
1106         case lir_cond_belowEqual:   acond = Assembler::LS; break;
1107         case lir_cond_aboveEqual:   acond = Assembler::HS; break;
1108         default:                    ShouldNotReachHere();
1109           acond = Assembler::EQ;  // unreachable
1110       }
1111     }
1112     __ br(acond,*(op->label()));
1113   }
1114 }
1115 
1116 
1117 
1118 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1119   LIR_Opr src  = op->in_opr();
1120   LIR_Opr dest = op->result_opr();
1121 
1122   switch (op->bytecode()) {
1123     case Bytecodes::_i2f:
1124       {
1125         __ scvtfws(dest->as_float_reg(), src->as_register());
1126         break;
1127       }
1128     case Bytecodes::_i2d:
1129       {
1130         __ scvtfwd(dest->as_double_reg(), src->as_register());
1131         break;
1132       }
1133     case Bytecodes::_l2d:
1134       {
1135         __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1136         break;
1137       }
1138     case Bytecodes::_l2f:
1139       {
1140         __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1141         break;
1142       }
1143     case Bytecodes::_f2d:
1144       {
1145         __ fcvts(dest->as_double_reg(), src->as_float_reg());
1146         break;
1147       }
1148     case Bytecodes::_d2f:
1149       {
1150         __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1151         break;
1152       }
1153     case Bytecodes::_i2c:
1154       {
1155         __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1156         break;
1157       }
1158     case Bytecodes::_i2l:
1159       {
1160         __ sxtw(dest->as_register_lo(), src->as_register());
1161         break;
1162       }
1163     case Bytecodes::_i2s:
1164       {
1165         __ sxth(dest->as_register(), src->as_register());
1166         break;
1167       }
1168     case Bytecodes::_i2b:
1169       {
1170         __ sxtb(dest->as_register(), src->as_register());
1171         break;
1172       }
1173     case Bytecodes::_l2i:
1174       {
1175         _masm->block_comment("FIXME: This could be a no-op");
1176         __ uxtw(dest->as_register(), src->as_register_lo());
1177         break;
1178       }
1179     case Bytecodes::_d2l:
1180       {
1181         __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1182         break;
1183       }
1184     case Bytecodes::_f2i:
1185       {
1186         __ fcvtzsw(dest->as_register(), src->as_float_reg());
1187         break;
1188       }
1189     case Bytecodes::_f2l:
1190       {
1191         __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1192         break;
1193       }
1194     case Bytecodes::_d2i:
1195       {
1196         __ fcvtzdw(dest->as_register(), src->as_double_reg());
1197         break;
1198       }
1199     default: ShouldNotReachHere();
1200   }
1201 }
1202 
1203 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1204   if (op->init_check()) {
1205     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1206                                InstanceKlass::init_state_offset()));
1207     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1208     add_debug_info_for_null_check_here(op->stub()->info());
1209     __ br(Assembler::NE, *op->stub()->entry());
1210   }
1211   __ allocate_object(op->obj()->as_register(),
1212                      op->tmp1()->as_register(),
1213                      op->tmp2()->as_register(),
1214                      op->header_size(),
1215                      op->object_size(),
1216                      op->klass()->as_register(),
1217                      *op->stub()->entry());
1218   __ bind(*op->stub()->continuation());
1219 }
1220 
1221 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1222   Register len =  op->len()->as_register();
1223   __ uxtw(len, len);
1224 
1225   if (UseSlowPath ||
1226       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1227       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1228     __ b(*op->stub()->entry());
1229   } else {
1230     Register tmp1 = op->tmp1()->as_register();
1231     Register tmp2 = op->tmp2()->as_register();
1232     Register tmp3 = op->tmp3()->as_register();
1233     if (len == tmp1) {
1234       tmp1 = tmp3;
1235     } else if (len == tmp2) {
1236       tmp2 = tmp3;
1237     } else if (len == tmp3) {
1238       // everything is ok
1239     } else {
1240       __ mov(tmp3, len);
1241     }
1242     __ allocate_array(op->obj()->as_register(),
1243                       len,
1244                       tmp1,
1245                       tmp2,
1246                       arrayOopDesc::header_size(op->type()),
1247                       array_element_size(op->type()),
1248                       op->klass()->as_register(),
1249                       *op->stub()->entry());
1250   }
1251   __ bind(*op->stub()->continuation());
1252 }
1253 
1254 void LIR_Assembler::type_profile_helper(Register mdo,
1255                                         ciMethodData *md, ciProfileData *data,
1256                                         Register recv, Label* update_done) {
1257   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1258     Label next_test;
1259     // See if the receiver is receiver[n].
1260     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1261     __ ldr(rscratch1, Address(rscratch2));
1262     __ cmp(recv, rscratch1);
1263     __ br(Assembler::NE, next_test);
1264     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1265     __ addptr(data_addr, DataLayout::counter_increment);
1266     __ b(*update_done);
1267     __ bind(next_test);
1268   }
1269 
1270   // Didn't find receiver; find next empty slot and fill it in
1271   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1272     Label next_test;
1273     __ lea(rscratch2,
1274            Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1275     Address recv_addr(rscratch2);
1276     __ ldr(rscratch1, recv_addr);
1277     __ cbnz(rscratch1, next_test);
1278     __ str(recv, recv_addr);
1279     __ mov(rscratch1, DataLayout::counter_increment);
1280     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1281     __ str(rscratch1, Address(rscratch2));
1282     __ b(*update_done);
1283     __ bind(next_test);
1284   }
1285 }
1286 
1287 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1288   // we always need a stub for the failure case.
1289   CodeStub* stub = op->stub();
1290   Register obj = op->object()->as_register();
1291   Register k_RInfo = op->tmp1()->as_register();
1292   Register klass_RInfo = op->tmp2()->as_register();
1293   Register dst = op->result_opr()->as_register();
1294   ciKlass* k = op->klass();
1295   Register Rtmp1 = noreg;
1296 
1297   // check if it needs to be profiled
1298   ciMethodData* md;
1299   ciProfileData* data;
1300 
1301   const bool should_profile = op->should_profile();
1302 
1303   if (should_profile) {
1304     ciMethod* method = op->profiled_method();
1305     assert(method != NULL, "Should have method");
1306     int bci = op->profiled_bci();
1307     md = method->method_data_or_null();
1308     assert(md != NULL, "Sanity");
1309     data = md->bci_to_data(bci);
1310     assert(data != NULL,                "need data for type check");
1311     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1312   }
1313   Label profile_cast_success, profile_cast_failure;
1314   Label *success_target = should_profile ? &profile_cast_success : success;
1315   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1316 
1317   if (obj == k_RInfo) {
1318     k_RInfo = dst;
1319   } else if (obj == klass_RInfo) {
1320     klass_RInfo = dst;
1321   }
1322   if (k->is_loaded() && !UseCompressedClassPointers) {
1323     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1324   } else {
1325     Rtmp1 = op->tmp3()->as_register();
1326     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1327   }
1328 
1329   assert_different_registers(obj, k_RInfo, klass_RInfo);
1330 
1331     if (should_profile) {
1332       Label not_null;
1333       __ cbnz(obj, not_null);
1334       // Object is null; update MDO and exit
1335       Register mdo  = klass_RInfo;
1336       __ mov_metadata(mdo, md->constant_encoding());
1337       Address data_addr
1338         = __ form_address(rscratch2, mdo,
1339                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1340                           0);
1341       __ ldrb(rscratch1, data_addr);
1342       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1343       __ strb(rscratch1, data_addr);
1344       __ b(*obj_is_null);
1345       __ bind(not_null);
1346     } else {
1347       __ cbz(obj, *obj_is_null);
1348     }
1349 
1350   if (!k->is_loaded()) {
1351     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1352   } else {
1353     __ mov_metadata(k_RInfo, k->constant_encoding());
1354   }
1355   __ verify_oop(obj);
1356 
1357   if (op->fast_check()) {
1358     // get object class
1359     // not a safepoint as obj null check happens earlier
1360     __ load_klass(rscratch1, obj);
1361     __ cmp( rscratch1, k_RInfo);
1362 
1363     __ br(Assembler::NE, *failure_target);
1364     // successful cast, fall through to profile or jump
1365   } else {
1366     // get object class
1367     // not a safepoint as obj null check happens earlier
1368     __ load_klass(klass_RInfo, obj);
1369     if (k->is_loaded()) {
1370       // See if we get an immediate positive hit
1371       __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1372       __ cmp(k_RInfo, rscratch1);
1373       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1374         __ br(Assembler::NE, *failure_target);
1375         // successful cast, fall through to profile or jump
1376       } else {
1377         // See if we get an immediate positive hit
1378         __ br(Assembler::EQ, *success_target);
1379         // check for self
1380         __ cmp(klass_RInfo, k_RInfo);
1381         __ br(Assembler::EQ, *success_target);
1382 
1383         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1384         __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1385         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1386         // result is a boolean
1387         __ cbzw(klass_RInfo, *failure_target);
1388         // successful cast, fall through to profile or jump
1389       }
1390     } else {
1391       // perform the fast part of the checking logic
1392       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1393       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1394       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1395       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1396       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1397       // result is a boolean
1398       __ cbz(k_RInfo, *failure_target);
1399       // successful cast, fall through to profile or jump
1400     }
1401   }
1402   if (should_profile) {
1403     Register mdo  = klass_RInfo, recv = k_RInfo;
1404     __ bind(profile_cast_success);
1405     __ mov_metadata(mdo, md->constant_encoding());
1406     __ load_klass(recv, obj);
1407     Label update_done;
1408     type_profile_helper(mdo, md, data, recv, success);
1409     __ b(*success);
1410 
1411     __ bind(profile_cast_failure);
1412     __ mov_metadata(mdo, md->constant_encoding());
1413     Address counter_addr
1414       = __ form_address(rscratch2, mdo,
1415                         md->byte_offset_of_slot(data, CounterData::count_offset()),
1416                         0);
1417     __ ldr(rscratch1, counter_addr);
1418     __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1419     __ str(rscratch1, counter_addr);
1420     __ b(*failure);
1421   }
1422   __ b(*success);
1423 }
1424 
1425 
1426 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1427   const bool should_profile = op->should_profile();
1428 
1429   LIR_Code code = op->code();
1430   if (code == lir_store_check) {
1431     Register value = op->object()->as_register();
1432     Register array = op->array()->as_register();
1433     Register k_RInfo = op->tmp1()->as_register();
1434     Register klass_RInfo = op->tmp2()->as_register();
1435     Register Rtmp1 = op->tmp3()->as_register();
1436 
1437     CodeStub* stub = op->stub();
1438 
1439     // check if it needs to be profiled
1440     ciMethodData* md;
1441     ciProfileData* data;
1442 
1443     if (should_profile) {
1444       ciMethod* method = op->profiled_method();
1445       assert(method != NULL, "Should have method");
1446       int bci = op->profiled_bci();
1447       md = method->method_data_or_null();
1448       assert(md != NULL, "Sanity");
1449       data = md->bci_to_data(bci);
1450       assert(data != NULL,                "need data for type check");
1451       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1452     }
1453     Label profile_cast_success, profile_cast_failure, done;
1454     Label *success_target = should_profile ? &profile_cast_success : &done;
1455     Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
1456 
1457     if (should_profile) {
1458       Label not_null;
1459       __ cbnz(value, not_null);
1460       // Object is null; update MDO and exit
1461       Register mdo  = klass_RInfo;
1462       __ mov_metadata(mdo, md->constant_encoding());
1463       Address data_addr
1464         = __ form_address(rscratch2, mdo,
1465                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1466                           0);
1467       __ ldrb(rscratch1, data_addr);
1468       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1469       __ strb(rscratch1, data_addr);
1470       __ b(done);
1471       __ bind(not_null);
1472     } else {
1473       __ cbz(value, done);
1474     }
1475 
1476     add_debug_info_for_null_check_here(op->info_for_exception());
1477     __ load_klass(k_RInfo, array);
1478     __ load_klass(klass_RInfo, value);
1479 
1480     // get instance klass (it's already uncompressed)
1481     __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1482     // perform the fast part of the checking logic
1483     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1484     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1485     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1486     __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1487     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1488     // result is a boolean
1489     __ cbzw(k_RInfo, *failure_target);
1490     // fall through to the success case
1491 
1492     if (should_profile) {
1493       Register mdo  = klass_RInfo, recv = k_RInfo;
1494       __ bind(profile_cast_success);
1495       __ mov_metadata(mdo, md->constant_encoding());
1496       __ load_klass(recv, value);
1497       Label update_done;
1498       type_profile_helper(mdo, md, data, recv, &done);
1499       __ b(done);
1500 
1501       __ bind(profile_cast_failure);
1502       __ mov_metadata(mdo, md->constant_encoding());
1503       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1504       __ lea(rscratch2, counter_addr);
1505       __ ldr(rscratch1, Address(rscratch2));
1506       __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1507       __ str(rscratch1, Address(rscratch2));
1508       __ b(*stub->entry());
1509     }
1510 
1511     __ bind(done);
1512   } else if (code == lir_checkcast) {
1513     Register obj = op->object()->as_register();
1514     Register dst = op->result_opr()->as_register();
1515     Label success;
1516     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1517     __ bind(success);
1518     if (dst != obj) {
1519       __ mov(dst, obj);
1520     }
1521   } else if (code == lir_instanceof) {
1522     Register obj = op->object()->as_register();
1523     Register dst = op->result_opr()->as_register();
1524     Label success, failure, done;
1525     emit_typecheck_helper(op, &success, &failure, &failure);
1526     __ bind(failure);
1527     __ mov(dst, zr);
1528     __ b(done);
1529     __ bind(success);
1530     __ mov(dst, 1);
1531     __ bind(done);
1532   } else {
1533     ShouldNotReachHere();
1534   }
1535 }
1536 
1537 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1538   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1539   __ cset(rscratch1, Assembler::NE);
1540   __ membar(__ AnyAny);
1541 }
1542 
1543 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1544   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1545   __ cset(rscratch1, Assembler::NE);
1546   __ membar(__ AnyAny);
1547 }
1548 
1549 
1550 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1551   assert(VM_Version::supports_cx8(), "wrong machine");
1552   Register addr;
1553   if (op->addr()->is_register()) {
1554     addr = as_reg(op->addr());
1555   } else {
1556     assert(op->addr()->is_address(), "what else?");
1557     LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1558     assert(addr_ptr->disp() == 0, "need 0 disp");
1559     assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1560     addr = as_reg(addr_ptr->base());
1561   }
1562   Register newval = as_reg(op->new_value());
1563   Register cmpval = as_reg(op->cmp_value());
1564 
1565   if (op->code() == lir_cas_obj) {
1566     if (UseCompressedOops) {
1567       Register t1 = op->tmp1()->as_register();
1568       assert(op->tmp1()->is_valid(), "must be");
1569       __ encode_heap_oop(t1, cmpval);
1570       cmpval = t1;
1571       __ encode_heap_oop(rscratch2, newval);
1572       newval = rscratch2;
1573       casw(addr, newval, cmpval);
1574     } else {
1575       casl(addr, newval, cmpval);
1576     }
1577   } else if (op->code() == lir_cas_int) {
1578     casw(addr, newval, cmpval);
1579   } else {
1580     casl(addr, newval, cmpval);
1581   }
1582 }
1583 
1584 
1585 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1586 
1587   Assembler::Condition acond, ncond;
1588   switch (condition) {
1589   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1590   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1591   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1592   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1593   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1594   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1595   case lir_cond_belowEqual:
1596   case lir_cond_aboveEqual:
1597   default:                    ShouldNotReachHere();
1598     acond = Assembler::EQ; ncond = Assembler::NE;  // unreachable
1599   }
1600 
1601   assert(result->is_single_cpu() || result->is_double_cpu(),
1602          "expect single register for result");
1603   if (opr1->is_constant() && opr2->is_constant()
1604       && opr1->type() == T_INT && opr2->type() == T_INT) {
1605     jint val1 = opr1->as_jint();
1606     jint val2 = opr2->as_jint();
1607     if (val1 == 0 && val2 == 1) {
1608       __ cset(result->as_register(), ncond);
1609       return;
1610     } else if (val1 == 1 && val2 == 0) {
1611       __ cset(result->as_register(), acond);
1612       return;
1613     }
1614   }
1615 
1616   if (opr1->is_constant() && opr2->is_constant()
1617       && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1618     jlong val1 = opr1->as_jlong();
1619     jlong val2 = opr2->as_jlong();
1620     if (val1 == 0 && val2 == 1) {
1621       __ cset(result->as_register_lo(), ncond);
1622       return;
1623     } else if (val1 == 1 && val2 == 0) {
1624       __ cset(result->as_register_lo(), acond);
1625       return;
1626     }
1627   }
1628 
1629   if (opr1->is_stack()) {
1630     stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1631     opr1 = FrameMap::rscratch1_opr;
1632   } else if (opr1->is_constant()) {
1633     LIR_Opr tmp
1634       = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1635     const2reg(opr1, tmp, lir_patch_none, NULL);
1636     opr1 = tmp;
1637   }
1638 
1639   if (opr2->is_stack()) {
1640     stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1641     opr2 = FrameMap::rscratch2_opr;
1642   } else if (opr2->is_constant()) {
1643     LIR_Opr tmp
1644       = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1645     const2reg(opr2, tmp, lir_patch_none, NULL);
1646     opr2 = tmp;
1647   }
1648 
1649   if (result->type() == T_LONG)
1650     __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1651   else
1652     __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1653 }
1654 
1655 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1656   assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1657 
1658   if (left->is_single_cpu()) {
1659     Register lreg = left->as_register();
1660     Register dreg = as_reg(dest);
1661 
1662     if (right->is_single_cpu()) {
1663       // cpu register - cpu register
1664 
1665       assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1666              "should be");
1667       Register rreg = right->as_register();
1668       switch (code) {
1669       case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1670       case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1671       case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1672       default:      ShouldNotReachHere();
1673       }
1674 
1675     } else if (right->is_double_cpu()) {
1676       Register rreg = right->as_register_lo();
1677       // single_cpu + double_cpu: can happen with obj+long
1678       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1679       switch (code) {
1680       case lir_add: __ add(dreg, lreg, rreg); break;
1681       case lir_sub: __ sub(dreg, lreg, rreg); break;
1682       default: ShouldNotReachHere();
1683       }
1684     } else if (right->is_constant()) {
1685       // cpu register - constant
1686       jlong c;
1687 
1688       // FIXME.  This is fugly: we really need to factor all this logic.
1689       switch(right->type()) {
1690       case T_LONG:
1691         c = right->as_constant_ptr()->as_jlong();
1692         break;
1693       case T_INT:
1694       case T_ADDRESS:
1695         c = right->as_constant_ptr()->as_jint();
1696         break;
1697       default:
1698         ShouldNotReachHere();
1699         c = 0;  // unreachable
1700         break;
1701       }
1702 
1703       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1704       if (c == 0 && dreg == lreg) {
1705         COMMENT("effective nop elided");
1706         return;
1707       }
1708       switch(left->type()) {
1709       case T_INT:
1710         switch (code) {
1711         case lir_add: __ addw(dreg, lreg, c); break;
1712         case lir_sub: __ subw(dreg, lreg, c); break;
1713         default: ShouldNotReachHere();
1714         }
1715         break;
1716       case T_OBJECT:
1717       case T_ADDRESS:
1718         switch (code) {
1719         case lir_add: __ add(dreg, lreg, c); break;
1720         case lir_sub: __ sub(dreg, lreg, c); break;
1721         default: ShouldNotReachHere();
1722         }
1723         break;
1724       default:
1725         ShouldNotReachHere();
1726       }
1727     } else {
1728       ShouldNotReachHere();
1729     }
1730 
1731   } else if (left->is_double_cpu()) {
1732     Register lreg_lo = left->as_register_lo();
1733 
1734     if (right->is_double_cpu()) {
1735       // cpu register - cpu register
1736       Register rreg_lo = right->as_register_lo();
1737       switch (code) {
1738       case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1739       case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1740       case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1741       case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1742       case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1743       default:
1744         ShouldNotReachHere();
1745       }
1746 
1747     } else if (right->is_constant()) {
1748       jlong c = right->as_constant_ptr()->as_jlong();
1749       Register dreg = as_reg(dest);
1750       switch (code) {
1751         case lir_add:
1752         case lir_sub:
1753           if (c == 0 && dreg == lreg_lo) {
1754             COMMENT("effective nop elided");
1755             return;
1756           }
1757           code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
1758           break;
1759         case lir_div:
1760           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1761           if (c == 1) {
1762             // move lreg_lo to dreg if divisor is 1
1763             __ mov(dreg, lreg_lo);
1764           } else {
1765             unsigned int shift = log2i_exact(c);
1766             // use rscratch1 as intermediate result register
1767             __ asr(rscratch1, lreg_lo, 63);
1768             __ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);
1769             __ asr(dreg, rscratch1, shift);
1770           }
1771           break;
1772         case lir_rem:
1773           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1774           if (c == 1) {
1775             // move 0 to dreg if divisor is 1
1776             __ mov(dreg, zr);
1777           } else {
1778             // use rscratch1 as intermediate result register
1779             __ negs(rscratch1, lreg_lo);
1780             __ andr(dreg, lreg_lo, c - 1);
1781             __ andr(rscratch1, rscratch1, c - 1);
1782             __ csneg(dreg, dreg, rscratch1, Assembler::MI);
1783           }
1784           break;
1785         default:
1786           ShouldNotReachHere();
1787       }
1788     } else {
1789       ShouldNotReachHere();
1790     }
1791   } else if (left->is_single_fpu()) {
1792     assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1793     switch (code) {
1794     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1795     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1796     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1797     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1798     default:
1799       ShouldNotReachHere();
1800     }
1801   } else if (left->is_double_fpu()) {
1802     if (right->is_double_fpu()) {
1803       // fpu register - fpu register
1804       switch (code) {
1805       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1806       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1807       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1808       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1809       default:
1810         ShouldNotReachHere();
1811       }
1812     } else {
1813       if (right->is_constant()) {
1814         ShouldNotReachHere();
1815       }
1816       ShouldNotReachHere();
1817     }
1818   } else if (left->is_single_stack() || left->is_address()) {
1819     assert(left == dest, "left and dest must be equal");
1820     ShouldNotReachHere();
1821   } else {
1822     ShouldNotReachHere();
1823   }
1824 }
1825 
1826 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
1827 
1828 
1829 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1830   switch(code) {
1831   case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1832   case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1833   default      : ShouldNotReachHere();
1834   }
1835 }
1836 
1837 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1838 
1839   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1840   Register Rleft = left->is_single_cpu() ? left->as_register() :
1841                                            left->as_register_lo();
1842    if (dst->is_single_cpu()) {
1843      Register Rdst = dst->as_register();
1844      if (right->is_constant()) {
1845        switch (code) {
1846          case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1847          case lir_logic_or:  __ orrw (Rdst, Rleft, right->as_jint()); break;
1848          case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1849          default: ShouldNotReachHere(); break;
1850        }
1851      } else {
1852        Register Rright = right->is_single_cpu() ? right->as_register() :
1853                                                   right->as_register_lo();
1854        switch (code) {
1855          case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1856          case lir_logic_or:  __ orrw (Rdst, Rleft, Rright); break;
1857          case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1858          default: ShouldNotReachHere(); break;
1859        }
1860      }
1861    } else {
1862      Register Rdst = dst->as_register_lo();
1863      if (right->is_constant()) {
1864        switch (code) {
1865          case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1866          case lir_logic_or:  __ orr (Rdst, Rleft, right->as_jlong()); break;
1867          case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1868          default: ShouldNotReachHere(); break;
1869        }
1870      } else {
1871        Register Rright = right->is_single_cpu() ? right->as_register() :
1872                                                   right->as_register_lo();
1873        switch (code) {
1874          case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1875          case lir_logic_or:  __ orr (Rdst, Rleft, Rright); break;
1876          case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1877          default: ShouldNotReachHere(); break;
1878        }
1879      }
1880    }
1881 }
1882 
1883 
1884 
1885 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {
1886 
1887   // opcode check
1888   assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
1889   bool is_irem = (code == lir_irem);
1890 
1891   // operand check
1892   assert(left->is_single_cpu(),   "left must be register");
1893   assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
1894   assert(result->is_single_cpu(), "result must be register");
1895   Register lreg = left->as_register();
1896   Register dreg = result->as_register();
1897 
1898   // power-of-2 constant check and codegen
1899   if (right->is_constant()) {
1900     int c = right->as_constant_ptr()->as_jint();
1901     assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1902     if (is_irem) {
1903       if (c == 1) {
1904         // move 0 to dreg if divisor is 1
1905         __ movw(dreg, zr);
1906       } else {
1907         // use rscratch1 as intermediate result register
1908         __ negsw(rscratch1, lreg);
1909         __ andw(dreg, lreg, c - 1);
1910         __ andw(rscratch1, rscratch1, c - 1);
1911         __ csnegw(dreg, dreg, rscratch1, Assembler::MI);
1912       }
1913     } else {
1914       if (c == 1) {
1915         // move lreg to dreg if divisor is 1
1916         __ movw(dreg, lreg);
1917       } else {
1918         unsigned int shift = exact_log2(c);
1919         // use rscratch1 as intermediate result register
1920         __ asrw(rscratch1, lreg, 31);
1921         __ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
1922         __ asrw(dreg, rscratch1, shift);
1923       }
1924     }
1925   } else {
1926     Register rreg = right->as_register();
1927     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1928   }
1929 }
1930 
1931 
1932 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1933   if (opr1->is_constant() && opr2->is_single_cpu()) {
1934     // tableswitch
1935     Register reg = as_reg(opr2);
1936     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1937     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1938   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1939     Register reg1 = as_reg(opr1);
1940     if (opr2->is_single_cpu()) {
1941       // cpu register - cpu register
1942       Register reg2 = opr2->as_register();
1943       if (is_reference_type(opr1->type())) {
1944         __ cmpoop(reg1, reg2);
1945       } else {
1946         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1947         __ cmpw(reg1, reg2);
1948       }
1949       return;
1950     }
1951     if (opr2->is_double_cpu()) {
1952       // cpu register - cpu register
1953       Register reg2 = opr2->as_register_lo();
1954       __ cmp(reg1, reg2);
1955       return;
1956     }
1957 
1958     if (opr2->is_constant()) {
1959       bool is_32bit = false; // width of register operand
1960       jlong imm;
1961 
1962       switch(opr2->type()) {
1963       case T_INT:
1964         imm = opr2->as_constant_ptr()->as_jint();
1965         is_32bit = true;
1966         break;
1967       case T_LONG:
1968         imm = opr2->as_constant_ptr()->as_jlong();
1969         break;
1970       case T_ADDRESS:
1971         imm = opr2->as_constant_ptr()->as_jint();
1972         break;
1973       case T_METADATA:
1974         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1975         break;
1976       case T_OBJECT:
1977       case T_ARRAY:
1978         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1979         __ cmpoop(reg1, rscratch1);
1980         return;
1981       default:
1982         ShouldNotReachHere();
1983         imm = 0;  // unreachable
1984         break;
1985       }
1986 
1987       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1988         if (is_32bit)
1989           __ cmpw(reg1, imm);
1990         else
1991           __ subs(zr, reg1, imm);
1992         return;
1993       } else {
1994         __ mov(rscratch1, imm);
1995         if (is_32bit)
1996           __ cmpw(reg1, rscratch1);
1997         else
1998           __ cmp(reg1, rscratch1);
1999         return;
2000       }
2001     } else
2002       ShouldNotReachHere();
2003   } else if (opr1->is_single_fpu()) {
2004     FloatRegister reg1 = opr1->as_float_reg();
2005     assert(opr2->is_single_fpu(), "expect single float register");
2006     FloatRegister reg2 = opr2->as_float_reg();
2007     __ fcmps(reg1, reg2);
2008   } else if (opr1->is_double_fpu()) {
2009     FloatRegister reg1 = opr1->as_double_reg();
2010     assert(opr2->is_double_fpu(), "expect double float register");
2011     FloatRegister reg2 = opr2->as_double_reg();
2012     __ fcmpd(reg1, reg2);
2013   } else {
2014     ShouldNotReachHere();
2015   }
2016 }
2017 
2018 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2019   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2020     bool is_unordered_less = (code == lir_ucmp_fd2i);
2021     if (left->is_single_fpu()) {
2022       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2023     } else if (left->is_double_fpu()) {
2024       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2025     } else {
2026       ShouldNotReachHere();
2027     }
2028   } else if (code == lir_cmp_l2i) {
2029     Label done;
2030     __ cmp(left->as_register_lo(), right->as_register_lo());
2031     __ mov(dst->as_register(), (uint64_t)-1L);
2032     __ br(Assembler::LT, done);
2033     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2034     __ bind(done);
2035   } else {
2036     ShouldNotReachHere();
2037   }
2038 }
2039 
2040 
2041 void LIR_Assembler::align_call(LIR_Code code) {  }
2042 
2043 
2044 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2045   address call = __ trampoline_call(Address(op->addr(), rtype));
2046   if (call == NULL) {
2047     bailout("trampoline stub overflow");
2048     return;
2049   }
2050   add_call_info(code_offset(), op->info());
2051 }
2052 
2053 
2054 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2055   address call = __ ic_call(op->addr());
2056   if (call == NULL) {
2057     bailout("trampoline stub overflow");
2058     return;
2059   }
2060   add_call_info(code_offset(), op->info());
2061 }
2062 
2063 void LIR_Assembler::emit_static_call_stub() {
2064   address call_pc = __ pc();
2065   address stub = __ start_a_stub(call_stub_size());
2066   if (stub == NULL) {
2067     bailout("static call stub overflow");
2068     return;
2069   }
2070 
2071   int start = __ offset();
2072 
2073   __ relocate(static_stub_Relocation::spec(call_pc));
2074   __ emit_static_call_stub();
2075 
2076   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2077         <= call_stub_size(), "stub too big");
2078   __ end_a_stub();
2079 }
2080 
2081 
2082 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2083   assert(exceptionOop->as_register() == r0, "must match");
2084   assert(exceptionPC->as_register() == r3, "must match");
2085 
2086   // exception object is not added to oop map by LinearScan
2087   // (LinearScan assumes that no oops are in fixed registers)
2088   info->add_register_oop(exceptionOop);
2089   Runtime1::StubID unwind_id;
2090 
2091   // get current pc information
2092   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2093   if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
2094     // As no instructions have been generated yet for this LIR node it's
2095     // possible that an oop map already exists for the current offset.
2096     // In that case insert an dummy NOP here to ensure all oop map PCs
2097     // are unique. See JDK-8237483.
2098     __ nop();
2099   }
2100   int pc_for_athrow_offset = __ offset();
2101   InternalAddress pc_for_athrow(__ pc());
2102   __ adr(exceptionPC->as_register(), pc_for_athrow);
2103   add_call_info(pc_for_athrow_offset, info); // for exception handler
2104 
2105   __ verify_not_null_oop(r0);
2106   // search an exception handler (r0: exception oop, r3: throwing pc)
2107   if (compilation()->has_fpu_code()) {
2108     unwind_id = Runtime1::handle_exception_id;
2109   } else {
2110     unwind_id = Runtime1::handle_exception_nofpu_id;
2111   }
2112   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2113 
2114   // FIXME: enough room for two byte trap   ????
2115   __ nop();
2116 }
2117 
2118 
2119 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2120   assert(exceptionOop->as_register() == r0, "must match");
2121 
2122   __ b(_unwind_handler_entry);
2123 }
2124 
2125 
2126 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2127   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2128   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2129 
2130   switch (left->type()) {
2131     case T_INT: {
2132       switch (code) {
2133       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2134       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2135       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2136       default:
2137         ShouldNotReachHere();
2138         break;
2139       }
2140       break;
2141     case T_LONG:
2142     case T_ADDRESS:
2143     case T_OBJECT:
2144       switch (code) {
2145       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2146       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2147       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2148       default:
2149         ShouldNotReachHere();
2150         break;
2151       }
2152       break;
2153     default:
2154       ShouldNotReachHere();
2155       break;
2156     }
2157   }
2158 }
2159 
2160 
2161 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2162   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2163   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2164 
2165   switch (left->type()) {
2166     case T_INT: {
2167       switch (code) {
2168       case lir_shl:  __ lslw (dreg, lreg, count); break;
2169       case lir_shr:  __ asrw (dreg, lreg, count); break;
2170       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2171       default:
2172         ShouldNotReachHere();
2173         break;
2174       }
2175       break;
2176     case T_LONG:
2177     case T_ADDRESS:
2178     case T_OBJECT:
2179       switch (code) {
2180       case lir_shl:  __ lsl (dreg, lreg, count); break;
2181       case lir_shr:  __ asr (dreg, lreg, count); break;
2182       case lir_ushr: __ lsr (dreg, lreg, count); break;
2183       default:
2184         ShouldNotReachHere();
2185         break;
2186       }
2187       break;
2188     default:
2189       ShouldNotReachHere();
2190       break;
2191     }
2192   }
2193 }
2194 
2195 
2196 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2197   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2198   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2199   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2200   __ str (r, Address(sp, offset_from_rsp_in_bytes));
2201 }
2202 
2203 
2204 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2205   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2206   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2207   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2208   __ mov (rscratch1, c);
2209   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2210 }
2211 
2212 
2213 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2214   ShouldNotReachHere();
2215   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2216   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2217   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2218   __ lea(rscratch1, __ constant_oop_address(o));
2219   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2220 }
2221 
2222 
2223 // This code replaces a call to arraycopy; no exception may
2224 // be thrown in this code, they must be thrown in the System.arraycopy
2225 // activation frame; we could save some checks if this would not be the case
2226 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2227   ciArrayKlass* default_type = op->expected_type();
2228   Register src = op->src()->as_register();
2229   Register dst = op->dst()->as_register();
2230   Register src_pos = op->src_pos()->as_register();
2231   Register dst_pos = op->dst_pos()->as_register();
2232   Register length  = op->length()->as_register();
2233   Register tmp = op->tmp()->as_register();
2234 
2235   CodeStub* stub = op->stub();
2236   int flags = op->flags();
2237   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2238   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2239 
2240   // if we don't know anything, just go through the generic arraycopy
2241   if (default_type == NULL // || basic_type == T_OBJECT
2242       ) {
2243     Label done;
2244     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2245 
2246     // Save the arguments in case the generic arraycopy fails and we
2247     // have to fall back to the JNI stub
2248     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2249     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2250     __ str(src,              Address(sp, 4*BytesPerWord));
2251 
2252     address copyfunc_addr = StubRoutines::generic_arraycopy();
2253     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2254 
2255     // The arguments are in java calling convention so we shift them
2256     // to C convention
2257     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2258     __ mov(c_rarg0, j_rarg0);
2259     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2260     __ mov(c_rarg1, j_rarg1);
2261     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2262     __ mov(c_rarg2, j_rarg2);
2263     assert_different_registers(c_rarg3, j_rarg4);
2264     __ mov(c_rarg3, j_rarg3);
2265     __ mov(c_rarg4, j_rarg4);
2266 #ifndef PRODUCT
2267     if (PrintC1Statistics) {
2268       __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2269     }
2270 #endif
2271     __ far_call(RuntimeAddress(copyfunc_addr));
2272 
2273     __ cbz(r0, *stub->continuation());
2274 
2275     // Reload values from the stack so they are where the stub
2276     // expects them.
2277     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2278     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2279     __ ldr(src,              Address(sp, 4*BytesPerWord));
2280 
2281     // r0 is -1^K where K == partial copied count
2282     __ eonw(rscratch1, r0, zr);
2283     // adjust length down and src/end pos up by partial copied count
2284     __ subw(length, length, rscratch1);
2285     __ addw(src_pos, src_pos, rscratch1);
2286     __ addw(dst_pos, dst_pos, rscratch1);
2287     __ b(*stub->entry());
2288 
2289     __ bind(*stub->continuation());
2290     return;
2291   }
2292 
2293   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2294 
2295   int elem_size = type2aelembytes(basic_type);
2296   int scale = exact_log2(elem_size);
2297 
2298   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2299   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2300   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2301   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2302 
2303   // test for NULL
2304   if (flags & LIR_OpArrayCopy::src_null_check) {
2305     __ cbz(src, *stub->entry());
2306   }
2307   if (flags & LIR_OpArrayCopy::dst_null_check) {
2308     __ cbz(dst, *stub->entry());
2309   }
2310 
2311   // If the compiler was not able to prove that exact type of the source or the destination
2312   // of the arraycopy is an array type, check at runtime if the source or the destination is
2313   // an instance type.
2314   if (flags & LIR_OpArrayCopy::type_check) {
2315     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2316       __ load_klass(tmp, dst);
2317       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2318       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2319       __ br(Assembler::GE, *stub->entry());
2320     }
2321 
2322     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2323       __ load_klass(tmp, src);
2324       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2325       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2326       __ br(Assembler::GE, *stub->entry());
2327     }
2328   }
2329 
2330   // check if negative
2331   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2332     __ cmpw(src_pos, 0);
2333     __ br(Assembler::LT, *stub->entry());
2334   }
2335   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2336     __ cmpw(dst_pos, 0);
2337     __ br(Assembler::LT, *stub->entry());
2338   }
2339 
2340   if (flags & LIR_OpArrayCopy::length_positive_check) {
2341     __ cmpw(length, 0);
2342     __ br(Assembler::LT, *stub->entry());
2343   }
2344 
2345   if (flags & LIR_OpArrayCopy::src_range_check) {
2346     __ addw(tmp, src_pos, length);
2347     __ ldrw(rscratch1, src_length_addr);
2348     __ cmpw(tmp, rscratch1);
2349     __ br(Assembler::HI, *stub->entry());
2350   }
2351   if (flags & LIR_OpArrayCopy::dst_range_check) {
2352     __ addw(tmp, dst_pos, length);
2353     __ ldrw(rscratch1, dst_length_addr);
2354     __ cmpw(tmp, rscratch1);
2355     __ br(Assembler::HI, *stub->entry());
2356   }
2357 
2358   if (flags & LIR_OpArrayCopy::type_check) {
2359     // We don't know the array types are compatible
2360     if (basic_type != T_OBJECT) {
2361       // Simple test for basic type arrays
2362       if (UseCompressedClassPointers) {
2363         __ ldrw(tmp, src_klass_addr);
2364         __ ldrw(rscratch1, dst_klass_addr);
2365         __ cmpw(tmp, rscratch1);
2366       } else {
2367         __ ldr(tmp, src_klass_addr);
2368         __ ldr(rscratch1, dst_klass_addr);
2369         __ cmp(tmp, rscratch1);
2370       }
2371       __ br(Assembler::NE, *stub->entry());
2372     } else {
2373       // For object arrays, if src is a sub class of dst then we can
2374       // safely do the copy.
2375       Label cont, slow;
2376 
2377 #define PUSH(r1, r2)                                    \
2378       stp(r1, r2, __ pre(sp, -2 * wordSize));
2379 
2380 #define POP(r1, r2)                                     \
2381       ldp(r1, r2, __ post(sp, 2 * wordSize));
2382 
2383       __ PUSH(src, dst);
2384 
2385       __ load_klass(src, src);
2386       __ load_klass(dst, dst);
2387 
2388       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2389 
2390       __ PUSH(src, dst);
2391       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2392       __ POP(src, dst);
2393 
2394       __ cbnz(src, cont);
2395 
2396       __ bind(slow);
2397       __ POP(src, dst);
2398 
2399       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2400       if (copyfunc_addr != NULL) { // use stub if available
2401         // src is not a sub class of dst so we have to do a
2402         // per-element check.
2403 
2404         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2405         if ((flags & mask) != mask) {
2406           // Check that at least both of them object arrays.
2407           assert(flags & mask, "one of the two should be known to be an object array");
2408 
2409           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2410             __ load_klass(tmp, src);
2411           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2412             __ load_klass(tmp, dst);
2413           }
2414           int lh_offset = in_bytes(Klass::layout_helper_offset());
2415           Address klass_lh_addr(tmp, lh_offset);
2416           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2417           __ ldrw(rscratch1, klass_lh_addr);
2418           __ mov(rscratch2, objArray_lh);
2419           __ eorw(rscratch1, rscratch1, rscratch2);
2420           __ cbnzw(rscratch1, *stub->entry());
2421         }
2422 
2423        // Spill because stubs can use any register they like and it's
2424        // easier to restore just those that we care about.
2425         __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2426         __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2427         __ str(src,              Address(sp, 4*BytesPerWord));
2428 
2429         __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2430         __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2431         assert_different_registers(c_rarg0, dst, dst_pos, length);
2432         __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2433         __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2434         assert_different_registers(c_rarg1, dst, length);
2435         __ uxtw(c_rarg2, length);
2436         assert_different_registers(c_rarg2, dst);
2437 
2438         __ load_klass(c_rarg4, dst);
2439         __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2440         __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2441         __ far_call(RuntimeAddress(copyfunc_addr));
2442 
2443 #ifndef PRODUCT
2444         if (PrintC1Statistics) {
2445           Label failed;
2446           __ cbnz(r0, failed);
2447           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2448           __ bind(failed);
2449         }
2450 #endif
2451 
2452         __ cbz(r0, *stub->continuation());
2453 
2454 #ifndef PRODUCT
2455         if (PrintC1Statistics) {
2456           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2457         }
2458 #endif
2459         assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2460 
2461         // Restore previously spilled arguments
2462         __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2463         __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2464         __ ldr(src,              Address(sp, 4*BytesPerWord));
2465 
2466         // return value is -1^K where K is partial copied count
2467         __ eonw(rscratch1, r0, zr);
2468         // adjust length down and src/end pos up by partial copied count
2469         __ subw(length, length, rscratch1);
2470         __ addw(src_pos, src_pos, rscratch1);
2471         __ addw(dst_pos, dst_pos, rscratch1);
2472       }
2473 
2474       __ b(*stub->entry());
2475 
2476       __ bind(cont);
2477       __ POP(src, dst);
2478     }
2479   }
2480 
2481 #ifdef ASSERT
2482   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2483     // Sanity check the known type with the incoming class.  For the
2484     // primitive case the types must match exactly with src.klass and
2485     // dst.klass each exactly matching the default type.  For the
2486     // object array case, if no type check is needed then either the
2487     // dst type is exactly the expected type and the src type is a
2488     // subtype which we can't check or src is the same array as dst
2489     // but not necessarily exactly of type default_type.
2490     Label known_ok, halt;
2491     __ mov_metadata(tmp, default_type->constant_encoding());
2492     if (UseCompressedClassPointers) {
2493       __ encode_klass_not_null(tmp);
2494     }
2495 
2496     if (basic_type != T_OBJECT) {
2497 
2498       if (UseCompressedClassPointers) {
2499         __ ldrw(rscratch1, dst_klass_addr);
2500         __ cmpw(tmp, rscratch1);
2501       } else {
2502         __ ldr(rscratch1, dst_klass_addr);
2503         __ cmp(tmp, rscratch1);
2504       }
2505       __ br(Assembler::NE, halt);
2506       if (UseCompressedClassPointers) {
2507         __ ldrw(rscratch1, src_klass_addr);
2508         __ cmpw(tmp, rscratch1);
2509       } else {
2510         __ ldr(rscratch1, src_klass_addr);
2511         __ cmp(tmp, rscratch1);
2512       }
2513       __ br(Assembler::EQ, known_ok);
2514     } else {
2515       if (UseCompressedClassPointers) {
2516         __ ldrw(rscratch1, dst_klass_addr);
2517         __ cmpw(tmp, rscratch1);
2518       } else {
2519         __ ldr(rscratch1, dst_klass_addr);
2520         __ cmp(tmp, rscratch1);
2521       }
2522       __ br(Assembler::EQ, known_ok);
2523       __ cmp(src, dst);
2524       __ br(Assembler::EQ, known_ok);
2525     }
2526     __ bind(halt);
2527     __ stop("incorrect type information in arraycopy");
2528     __ bind(known_ok);
2529   }
2530 #endif
2531 
2532 #ifndef PRODUCT
2533   if (PrintC1Statistics) {
2534     __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2535   }
2536 #endif
2537 
2538   __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2539   __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2540   assert_different_registers(c_rarg0, dst, dst_pos, length);
2541   __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2542   __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2543   assert_different_registers(c_rarg1, dst, length);
2544   __ uxtw(c_rarg2, length);
2545   assert_different_registers(c_rarg2, dst);
2546 
2547   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2548   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2549   const char *name;
2550   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2551 
2552  CodeBlob *cb = CodeCache::find_blob(entry);
2553  if (cb) {
2554    __ far_call(RuntimeAddress(entry));
2555  } else {
2556    __ call_VM_leaf(entry, 3);
2557  }
2558 
2559   __ bind(*stub->continuation());
2560 }
2561 
2562 
2563 
2564 
2565 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2566   Register obj = op->obj_opr()->as_register();  // may not be an oop
2567   Register hdr = op->hdr_opr()->as_register();
2568   Register lock = op->lock_opr()->as_register();
2569   if (UseHeavyMonitors) {
2570     __ b(*op->stub()->entry());
2571   } else if (op->code() == lir_lock) {
2572     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2573     // add debug info for NullPointerException only if one is possible
2574     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
2575     if (op->info() != NULL) {
2576       add_debug_info_for_null_check(null_check_offset, op->info());
2577     }
2578     // done
2579   } else if (op->code() == lir_unlock) {
2580     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2581     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2582   } else {
2583     Unimplemented();
2584   }
2585   __ bind(*op->stub()->continuation());
2586 }
2587 
2588 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2589   Register obj = op->obj()->as_pointer_register();
2590   Register result = op->result_opr()->as_pointer_register();
2591 
2592   CodeEmitInfo* info = op->info();
2593   if (info != NULL) {
2594     add_debug_info_for_null_check_here(info);
2595   }
2596 
2597   if (UseCompressedClassPointers) {
2598     __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes()));
2599     __ decode_klass_not_null(result);
2600   } else {
2601     __ ldr(result, Address (obj, oopDesc::klass_offset_in_bytes()));
2602   }
2603 }
2604 
2605 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2606   ciMethod* method = op->profiled_method();
2607   int bci          = op->profiled_bci();
2608   ciMethod* callee = op->profiled_callee();
2609 
2610   // Update counter for all call types
2611   ciMethodData* md = method->method_data_or_null();
2612   assert(md != NULL, "Sanity");
2613   ciProfileData* data = md->bci_to_data(bci);
2614   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2615   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2616   Register mdo  = op->mdo()->as_register();
2617   __ mov_metadata(mdo, md->constant_encoding());
2618   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2619   // Perform additional virtual call profiling for invokevirtual and
2620   // invokeinterface bytecodes
2621   if (op->should_profile_receiver_type()) {
2622     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2623     Register recv = op->recv()->as_register();
2624     assert_different_registers(mdo, recv);
2625     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2626     ciKlass* known_klass = op->known_holder();
2627     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2628       // We know the type that will be seen at this call site; we can
2629       // statically update the MethodData* rather than needing to do
2630       // dynamic tests on the receiver type
2631 
2632       // NOTE: we should probably put a lock around this search to
2633       // avoid collisions by concurrent compilations
2634       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2635       uint i;
2636       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2637         ciKlass* receiver = vc_data->receiver(i);
2638         if (known_klass->equals(receiver)) {
2639           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2640           __ addptr(data_addr, DataLayout::counter_increment);
2641           return;
2642         }
2643       }
2644 
2645       // Receiver type not found in profile data; select an empty slot
2646 
2647       // Note that this is less efficient than it should be because it
2648       // always does a write to the receiver part of the
2649       // VirtualCallData rather than just the first time
2650       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2651         ciKlass* receiver = vc_data->receiver(i);
2652         if (receiver == NULL) {
2653           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2654           __ mov_metadata(rscratch1, known_klass->constant_encoding());
2655           __ lea(rscratch2, recv_addr);
2656           __ str(rscratch1, Address(rscratch2));
2657           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2658           __ addptr(data_addr, DataLayout::counter_increment);
2659           return;
2660         }
2661       }
2662     } else {
2663       __ load_klass(recv, recv);
2664       Label update_done;
2665       type_profile_helper(mdo, md, data, recv, &update_done);
2666       // Receiver did not match any saved receiver and there is no empty row for it.
2667       // Increment total counter to indicate polymorphic case.
2668       __ addptr(counter_addr, DataLayout::counter_increment);
2669 
2670       __ bind(update_done);
2671     }
2672   } else {
2673     // Static call
2674     __ addptr(counter_addr, DataLayout::counter_increment);
2675   }
2676 }
2677 
2678 
2679 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2680   Unimplemented();
2681 }
2682 
2683 
2684 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2685   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2686 }
2687 
2688 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2689   assert(op->crc()->is_single_cpu(),  "crc must be register");
2690   assert(op->val()->is_single_cpu(),  "byte value must be register");
2691   assert(op->result_opr()->is_single_cpu(), "result must be register");
2692   Register crc = op->crc()->as_register();
2693   Register val = op->val()->as_register();
2694   Register res = op->result_opr()->as_register();
2695 
2696   assert_different_registers(val, crc, res);
2697   uint64_t offset;
2698   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2699   if (offset) __ add(res, res, offset);
2700 
2701   __ mvnw(crc, crc); // ~crc
2702   __ update_byte_crc32(crc, val, res);
2703   __ mvnw(res, crc); // ~crc
2704 }
2705 
2706 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2707   COMMENT("emit_profile_type {");
2708   Register obj = op->obj()->as_register();
2709   Register tmp = op->tmp()->as_pointer_register();
2710   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2711   ciKlass* exact_klass = op->exact_klass();
2712   intptr_t current_klass = op->current_klass();
2713   bool not_null = op->not_null();
2714   bool no_conflict = op->no_conflict();
2715 
2716   Label update, next, none;
2717 
2718   bool do_null = !not_null;
2719   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2720   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2721 
2722   assert(do_null || do_update, "why are we here?");
2723   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2724   assert(mdo_addr.base() != rscratch1, "wrong register");
2725 
2726   __ verify_oop(obj);
2727 
2728   if (tmp != obj) {
2729     __ mov(tmp, obj);
2730   }
2731   if (do_null) {
2732     __ cbnz(tmp, update);
2733     if (!TypeEntries::was_null_seen(current_klass)) {
2734       __ ldr(rscratch2, mdo_addr);
2735       __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2736       __ str(rscratch2, mdo_addr);
2737     }
2738     if (do_update) {
2739 #ifndef ASSERT
2740       __ b(next);
2741     }
2742 #else
2743       __ b(next);
2744     }
2745   } else {
2746     __ cbnz(tmp, update);
2747     __ stop("unexpected null obj");
2748 #endif
2749   }
2750 
2751   __ bind(update);
2752 
2753   if (do_update) {
2754 #ifdef ASSERT
2755     if (exact_klass != NULL) {
2756       Label ok;
2757       __ load_klass(tmp, tmp);
2758       __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2759       __ eor(rscratch1, tmp, rscratch1);
2760       __ cbz(rscratch1, ok);
2761       __ stop("exact klass and actual klass differ");
2762       __ bind(ok);
2763     }
2764 #endif
2765     if (!no_conflict) {
2766       if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2767         if (exact_klass != NULL) {
2768           __ mov_metadata(tmp, exact_klass->constant_encoding());
2769         } else {
2770           __ load_klass(tmp, tmp);
2771         }
2772 
2773         __ ldr(rscratch2, mdo_addr);
2774         __ eor(tmp, tmp, rscratch2);
2775         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2776         // klass seen before, nothing to do. The unknown bit may have been
2777         // set already but no need to check.
2778         __ cbz(rscratch1, next);
2779 
2780         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2781 
2782         if (TypeEntries::is_type_none(current_klass)) {
2783           __ cbz(rscratch2, none);
2784           __ cmp(rscratch2, (u1)TypeEntries::null_seen);
2785           __ br(Assembler::EQ, none);
2786           // There is a chance that the checks above (re-reading profiling
2787           // data from memory) fail if another thread has just set the
2788           // profiling to this obj's klass
2789           __ dmb(Assembler::ISHLD);
2790           __ ldr(rscratch2, mdo_addr);
2791           __ eor(tmp, tmp, rscratch2);
2792           __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2793           __ cbz(rscratch1, next);
2794         }
2795       } else {
2796         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2797                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2798 
2799         __ ldr(tmp, mdo_addr);
2800         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2801       }
2802 
2803       // different than before. Cannot keep accurate profile.
2804       __ ldr(rscratch2, mdo_addr);
2805       __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2806       __ str(rscratch2, mdo_addr);
2807 
2808       if (TypeEntries::is_type_none(current_klass)) {
2809         __ b(next);
2810 
2811         __ bind(none);
2812         // first time here. Set profile type.
2813         __ str(tmp, mdo_addr);
2814       }
2815     } else {
2816       // There's a single possible klass at this profile point
2817       assert(exact_klass != NULL, "should be");
2818       if (TypeEntries::is_type_none(current_klass)) {
2819         __ mov_metadata(tmp, exact_klass->constant_encoding());
2820         __ ldr(rscratch2, mdo_addr);
2821         __ eor(tmp, tmp, rscratch2);
2822         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2823         __ cbz(rscratch1, next);
2824 #ifdef ASSERT
2825         {
2826           Label ok;
2827           __ ldr(rscratch1, mdo_addr);
2828           __ cbz(rscratch1, ok);
2829           __ cmp(rscratch1, (u1)TypeEntries::null_seen);
2830           __ br(Assembler::EQ, ok);
2831           // may have been set by another thread
2832           __ dmb(Assembler::ISHLD);
2833           __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2834           __ ldr(rscratch2, mdo_addr);
2835           __ eor(rscratch2, rscratch1, rscratch2);
2836           __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2837           __ cbz(rscratch2, ok);
2838 
2839           __ stop("unexpected profiling mismatch");
2840           __ bind(ok);
2841         }
2842 #endif
2843         // first time here. Set profile type.
2844         __ str(tmp, mdo_addr);
2845       } else {
2846         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2847                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2848 
2849         __ ldr(tmp, mdo_addr);
2850         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2851 
2852         __ orr(tmp, tmp, TypeEntries::type_unknown);
2853         __ str(tmp, mdo_addr);
2854         // FIXME: Write barrier needed here?
2855       }
2856     }
2857 
2858     __ bind(next);
2859   }
2860   COMMENT("} emit_profile_type");
2861 }
2862 
2863 
2864 void LIR_Assembler::align_backward_branch_target() {
2865 }
2866 
2867 
2868 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2869   // tmp must be unused
2870   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2871 
2872   if (left->is_single_cpu()) {
2873     assert(dest->is_single_cpu(), "expect single result reg");
2874     __ negw(dest->as_register(), left->as_register());
2875   } else if (left->is_double_cpu()) {
2876     assert(dest->is_double_cpu(), "expect double result reg");
2877     __ neg(dest->as_register_lo(), left->as_register_lo());
2878   } else if (left->is_single_fpu()) {
2879     assert(dest->is_single_fpu(), "expect single float result reg");
2880     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2881   } else {
2882     assert(left->is_double_fpu(), "expect double float operand reg");
2883     assert(dest->is_double_fpu(), "expect double float result reg");
2884     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2885   }
2886 }
2887 
2888 
2889 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2890   if (patch_code != lir_patch_none) {
2891     deoptimize_trap(info);
2892     return;
2893   }
2894 
2895   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2896 }
2897 
2898 
2899 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2900   assert(!tmp->is_valid(), "don't need temporary");
2901 
2902   CodeBlob *cb = CodeCache::find_blob(dest);
2903   if (cb) {
2904     __ far_call(RuntimeAddress(dest));
2905   } else {
2906     __ mov(rscratch1, RuntimeAddress(dest));
2907     __ blr(rscratch1);
2908   }
2909 
2910   if (info != NULL) {
2911     add_call_info_here(info);
2912   }
2913 }
2914 
2915 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2916   if (dest->is_address() || src->is_address()) {
2917     move_op(src, dest, type, lir_patch_none, info,
2918             /*pop_fpu_stack*/false, /*wide*/false);
2919   } else {
2920     ShouldNotReachHere();
2921   }
2922 }
2923 
2924 #ifdef ASSERT
2925 // emit run-time assertion
2926 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2927   assert(op->code() == lir_assert, "must be");
2928 
2929   if (op->in_opr1()->is_valid()) {
2930     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2931     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2932   } else {
2933     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2934     assert(op->condition() == lir_cond_always, "no other conditions allowed");
2935   }
2936 
2937   Label ok;
2938   if (op->condition() != lir_cond_always) {
2939     Assembler::Condition acond = Assembler::AL;
2940     switch (op->condition()) {
2941       case lir_cond_equal:        acond = Assembler::EQ;  break;
2942       case lir_cond_notEqual:     acond = Assembler::NE;  break;
2943       case lir_cond_less:         acond = Assembler::LT;  break;
2944       case lir_cond_lessEqual:    acond = Assembler::LE;  break;
2945       case lir_cond_greaterEqual: acond = Assembler::GE;  break;
2946       case lir_cond_greater:      acond = Assembler::GT;  break;
2947       case lir_cond_belowEqual:   acond = Assembler::LS;  break;
2948       case lir_cond_aboveEqual:   acond = Assembler::HS;  break;
2949       default:                    ShouldNotReachHere();
2950     }
2951     __ br(acond, ok);
2952   }
2953   if (op->halt()) {
2954     const char* str = __ code_string(op->msg());
2955     __ stop(str);
2956   } else {
2957     breakpoint();
2958   }
2959   __ bind(ok);
2960 }
2961 #endif
2962 
2963 #ifndef PRODUCT
2964 #define COMMENT(x)   do { __ block_comment(x); } while (0)
2965 #else
2966 #define COMMENT(x)
2967 #endif
2968 
2969 void LIR_Assembler::membar() {
2970   COMMENT("membar");
2971   __ membar(MacroAssembler::AnyAny);
2972 }
2973 
2974 void LIR_Assembler::membar_acquire() {
2975   __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2976 }
2977 
2978 void LIR_Assembler::membar_release() {
2979   __ membar(Assembler::LoadStore|Assembler::StoreStore);
2980 }
2981 
2982 void LIR_Assembler::membar_loadload() {
2983   __ membar(Assembler::LoadLoad);
2984 }
2985 
2986 void LIR_Assembler::membar_storestore() {
2987   __ membar(MacroAssembler::StoreStore);
2988 }
2989 
2990 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2991 
2992 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2993 
2994 void LIR_Assembler::on_spin_wait() {
2995   __ spin_wait();
2996 }
2997 
2998 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2999   __ mov(result_reg->as_register(), rthread);
3000 }
3001 
3002 
3003 void LIR_Assembler::peephole(LIR_List *lir) {
3004 #if 0
3005   if (tableswitch_count >= max_tableswitches)
3006     return;
3007 
3008   /*
3009     This finite-state automaton recognizes sequences of compare-and-
3010     branch instructions.  We will turn them into a tableswitch.  You
3011     could argue that C1 really shouldn't be doing this sort of
3012     optimization, but without it the code is really horrible.
3013   */
3014 
3015   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3016   int first_key, last_key = -2147483648;
3017   int next_key = 0;
3018   int start_insn = -1;
3019   int last_insn = -1;
3020   Register reg = noreg;
3021   LIR_Opr reg_opr;
3022   state = start_s;
3023 
3024   LIR_OpList* inst = lir->instructions_list();
3025   for (int i = 0; i < inst->length(); i++) {
3026     LIR_Op* op = inst->at(i);
3027     switch (state) {
3028     case start_s:
3029       first_key = -1;
3030       start_insn = i;
3031       switch (op->code()) {
3032       case lir_cmp:
3033         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3034         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3035         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3036             && opr2->is_constant()
3037             && opr2->type() == T_INT) {
3038           reg_opr = opr1;
3039           reg = opr1->as_register();
3040           first_key = opr2->as_constant_ptr()->as_jint();
3041           next_key = first_key + 1;
3042           state = cmp_s;
3043           goto next_state;
3044         }
3045         break;
3046       }
3047       break;
3048     case cmp_s:
3049       switch (op->code()) {
3050       case lir_branch:
3051         if (op->as_OpBranch()->cond() == lir_cond_equal) {
3052           state = beq_s;
3053           last_insn = i;
3054           goto next_state;
3055         }
3056       }
3057       state = start_s;
3058       break;
3059     case beq_s:
3060       switch (op->code()) {
3061       case lir_cmp: {
3062         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3063         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3064         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3065             && opr1->as_register() == reg
3066             && opr2->is_constant()
3067             && opr2->type() == T_INT
3068             && opr2->as_constant_ptr()->as_jint() == next_key) {
3069           last_key = next_key;
3070           next_key++;
3071           state = cmp_s;
3072           goto next_state;
3073         }
3074       }
3075       }
3076       last_key = next_key;
3077       state = start_s;
3078       break;
3079     default:
3080       assert(false, "impossible state");
3081     }
3082     if (state == start_s) {
3083       if (first_key < last_key - 5L && reg != noreg) {
3084         {
3085           // printf("found run register %d starting at insn %d low value %d high value %d\n",
3086           //        reg->encoding(),
3087           //        start_insn, first_key, last_key);
3088           //   for (int i = 0; i < inst->length(); i++) {
3089           //     inst->at(i)->print();
3090           //     tty->print("\n");
3091           //   }
3092           //   tty->print("\n");
3093         }
3094 
3095         struct tableswitch *sw = &switches[tableswitch_count];
3096         sw->_insn_index = start_insn, sw->_first_key = first_key,
3097           sw->_last_key = last_key, sw->_reg = reg;
3098         inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3099         {
3100           // Insert the new table of branches
3101           int offset = last_insn;
3102           for (int n = first_key; n < last_key; n++) {
3103             inst->insert_before
3104               (last_insn + 1,
3105                new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3106                                 inst->at(offset)->as_OpBranch()->label()));
3107             offset -= 2, i++;
3108           }
3109         }
3110         // Delete all the old compare-and-branch instructions
3111         for (int n = first_key; n < last_key; n++) {
3112           inst->remove_at(start_insn);
3113           inst->remove_at(start_insn);
3114         }
3115         // Insert the tableswitch instruction
3116         inst->insert_before(start_insn,
3117                             new LIR_Op2(lir_cmp, lir_cond_always,
3118                                         LIR_OprFact::intConst(tableswitch_count),
3119                                         reg_opr));
3120         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3121         tableswitch_count++;
3122       }
3123       reg = noreg;
3124       last_key = -2147483648;
3125     }
3126   next_state:
3127     ;
3128   }
3129 #endif
3130 }
3131 
3132 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3133   Address addr = as_Address(src->as_address_ptr());
3134   BasicType type = src->type();
3135   bool is_oop = is_reference_type(type);
3136 
3137   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3138   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3139 
3140   switch(type) {
3141   case T_INT:
3142     xchg = &MacroAssembler::atomic_xchgalw;
3143     add = &MacroAssembler::atomic_addalw;
3144     break;
3145   case T_LONG:
3146     xchg = &MacroAssembler::atomic_xchgal;
3147     add = &MacroAssembler::atomic_addal;
3148     break;
3149   case T_OBJECT:
3150   case T_ARRAY:
3151     if (UseCompressedOops) {
3152       xchg = &MacroAssembler::atomic_xchgalw;
3153       add = &MacroAssembler::atomic_addalw;
3154     } else {
3155       xchg = &MacroAssembler::atomic_xchgal;
3156       add = &MacroAssembler::atomic_addal;
3157     }
3158     break;
3159   default:
3160     ShouldNotReachHere();
3161     xchg = &MacroAssembler::atomic_xchgal;
3162     add = &MacroAssembler::atomic_addal; // unreachable
3163   }
3164 
3165   switch (code) {
3166   case lir_xadd:
3167     {
3168       RegisterOrConstant inc;
3169       Register tmp = as_reg(tmp_op);
3170       Register dst = as_reg(dest);
3171       if (data->is_constant()) {
3172         inc = RegisterOrConstant(as_long(data));
3173         assert_different_registers(dst, addr.base(), tmp,
3174                                    rscratch1, rscratch2);
3175       } else {
3176         inc = RegisterOrConstant(as_reg(data));
3177         assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3178                                    rscratch1, rscratch2);
3179       }
3180       __ lea(tmp, addr);
3181       (_masm->*add)(dst, inc, tmp);
3182       break;
3183     }
3184   case lir_xchg:
3185     {
3186       Register tmp = tmp_op->as_register();
3187       Register obj = as_reg(data);
3188       Register dst = as_reg(dest);
3189       if (is_oop && UseCompressedOops) {
3190         __ encode_heap_oop(rscratch2, obj);
3191         obj = rscratch2;
3192       }
3193       assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3194       __ lea(tmp, addr);
3195       (_masm->*xchg)(dst, obj, tmp);
3196       if (is_oop && UseCompressedOops) {
3197         __ decode_heap_oop(dst);
3198       }
3199     }
3200     break;
3201   default:
3202     ShouldNotReachHere();
3203   }
3204   __ membar(__ AnyAny);
3205 }
3206 
3207 #undef __