1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_aarch64.inline.hpp"
  47 
  48 
  49 #ifndef PRODUCT
  50 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  51 #else
  52 #define COMMENT(x)
  53 #endif
  54 
  55 NEEDS_CLEANUP // remove this definitions ?
  56 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 
  63 static void select_different_registers(Register preserve,
  64                                        Register extra,
  65                                        Register &tmp1,
  66                                        Register &tmp2) {
  67   if (tmp1 == preserve) {
  68     assert_different_registers(tmp1, tmp2, extra);
  69     tmp1 = extra;
  70   } else if (tmp2 == preserve) {
  71     assert_different_registers(tmp1, tmp2, extra);
  72     tmp2 = extra;
  73   }
  74   assert_different_registers(preserve, tmp1, tmp2);
  75 }
  76 
  77 
  78 
  79 static void select_different_registers(Register preserve,
  80                                        Register extra,
  81                                        Register &tmp1,
  82                                        Register &tmp2,
  83                                        Register &tmp3) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, tmp3, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, tmp3, extra);
  89     tmp2 = extra;
  90   } else if (tmp3 == preserve) {
  91     assert_different_registers(tmp1, tmp2, tmp3, extra);
  92     tmp3 = extra;
  93   }
  94   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  95 }
  96 
  97 
  98 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  99 
 100 
 101 LIR_Opr LIR_Assembler::receiverOpr() {
 102   return FrameMap::receiver_opr;
 103 }
 104 
 105 LIR_Opr LIR_Assembler::osrBufferPointer() {
 106   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 107 }
 108 
 109 //--------------fpu register translations-----------------------
 110 
 111 
 112 address LIR_Assembler::float_constant(float f) {
 113   address const_addr = __ float_constant(f);
 114   if (const_addr == NULL) {
 115     bailout("const section overflow");
 116     return __ code()->consts()->start();
 117   } else {
 118     return const_addr;
 119   }
 120 }
 121 
 122 
 123 address LIR_Assembler::double_constant(double d) {
 124   address const_addr = __ double_constant(d);
 125   if (const_addr == NULL) {
 126     bailout("const section overflow");
 127     return __ code()->consts()->start();
 128   } else {
 129     return const_addr;
 130   }
 131 }
 132 
 133 address LIR_Assembler::int_constant(jlong n) {
 134   address const_addr = __ long_constant(n);
 135   if (const_addr == NULL) {
 136     bailout("const section overflow");
 137     return __ code()->consts()->start();
 138   } else {
 139     return const_addr;
 140   }
 141 }
 142 
 143 void LIR_Assembler::breakpoint() { Unimplemented(); }
 144 
 145 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 146 
 147 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 148 
 149 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
 150 //-------------------------------------------
 151 
 152 static Register as_reg(LIR_Opr op) {
 153   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 154 }
 155 
 156 static jlong as_long(LIR_Opr data) {
 157   jlong result;
 158   switch (data->type()) {
 159   case T_INT:
 160     result = (data->as_jint());
 161     break;
 162   case T_LONG:
 163     result = (data->as_jlong());
 164     break;
 165   default:
 166     ShouldNotReachHere();
 167     result = 0;  // unreachable
 168   }
 169   return result;
 170 }
 171 
 172 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 173   Register base = addr->base()->as_pointer_register();
 174   LIR_Opr opr = addr->index();
 175   if (opr->is_cpu_register()) {
 176     Register index;
 177     if (opr->is_single_cpu())
 178       index = opr->as_register();
 179     else
 180       index = opr->as_register_lo();
 181     assert(addr->disp() == 0, "must be");
 182     switch(opr->type()) {
 183       case T_INT:
 184         return Address(base, index, Address::sxtw(addr->scale()));
 185       case T_LONG:
 186         return Address(base, index, Address::lsl(addr->scale()));
 187       default:
 188         ShouldNotReachHere();
 189       }
 190   } else  {
 191     intptr_t addr_offset = intptr_t(addr->disp());
 192     if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
 193       return Address(base, addr_offset, Address::lsl(addr->scale()));
 194     else {
 195       __ mov(tmp, addr_offset);
 196       return Address(base, tmp, Address::lsl(addr->scale()));
 197     }
 198   }
 199   return Address();
 200 }
 201 
 202 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 203   ShouldNotReachHere();
 204   return Address();
 205 }
 206 
 207 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 208   return as_Address(addr, rscratch1);
 209 }
 210 
 211 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 212   return as_Address(addr, rscratch1);  // Ouch
 213   // FIXME: This needs to be much more clever.  See x86.
 214 }
 215 
 216 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
 217 // not encodable as a base + (immediate) offset, generate an explicit address
 218 // calculation to hold the address in a temporary register.
 219 Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
 220   precond(size == 4 || size == 8);
 221   Address addr = frame_map()->address_for_slot(index, adjust);
 222   precond(addr.getMode() == Address::base_plus_offset);
 223   precond(addr.base() == sp);
 224   precond(addr.offset() > 0);
 225   uint mask = size - 1;
 226   assert((addr.offset() & mask) == 0, "scaled offsets only");
 227   return __ legitimize_address(addr, size, tmp);
 228 }
 229 
 230 void LIR_Assembler::osr_entry() {
 231   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 232   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 233   ValueStack* entry_state = osr_entry->state();
 234   int number_of_locks = entry_state->locks_size();
 235 
 236   // we jump here if osr happens with the interpreter
 237   // state set up to continue at the beginning of the
 238   // loop that triggered osr - in particular, we have
 239   // the following registers setup:
 240   //
 241   // r2: osr buffer
 242   //
 243 
 244   // build frame
 245   ciMethod* m = compilation()->method();
 246   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 247 
 248   // OSR buffer is
 249   //
 250   // locals[nlocals-1..0]
 251   // monitors[0..number_of_locks]
 252   //
 253   // locals is a direct copy of the interpreter frame so in the osr buffer
 254   // so first slot in the local array is the last local from the interpreter
 255   // and last slot is local[0] (receiver) from the interpreter
 256   //
 257   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 258   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 259   // in the interpreter frame (the method lock if a sync method)
 260 
 261   // Initialize monitors in the compiled activation.
 262   //   r2: pointer to osr buffer
 263   //
 264   // All other registers are dead at this point and the locals will be
 265   // copied into place by code emitted in the IR.
 266 
 267   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 268   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 269     int monitor_offset = BytesPerWord * method()->max_locals() +
 270       (2 * BytesPerWord) * (number_of_locks - 1);
 271     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 272     // the OSR buffer using 2 word entries: first the lock and then
 273     // the oop.
 274     for (int i = 0; i < number_of_locks; i++) {
 275       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 276 #ifdef ASSERT
 277       // verify the interpreter's monitor has a non-null object
 278       {
 279         Label L;
 280         __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 281         __ cbnz(rscratch1, L);
 282         __ stop("locked object is NULL");
 283         __ bind(L);
 284       }
 285 #endif
 286       __ ldr(r19, Address(OSR_buf, slot_offset + 0));
 287       __ str(r19, frame_map()->address_for_monitor_lock(i));
 288       __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 289       __ str(r19, frame_map()->address_for_monitor_object(i));
 290     }
 291   }
 292 }
 293 
 294 
 295 // inline cache check; done before the frame is built.
 296 int LIR_Assembler::check_icache() {
 297   Register receiver = FrameMap::receiver_opr->as_register();
 298   Register ic_klass = IC_Klass;
 299   int start_offset = __ offset();
 300   __ inline_cache_check(receiver, ic_klass);
 301 
 302   // if icache check fails, then jump to runtime routine
 303   // Note: RECEIVER must still contain the receiver!
 304   Label dont;
 305   __ br(Assembler::EQ, dont);
 306   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 307 
 308   // We align the verified entry point unless the method body
 309   // (including its inline cache check) will fit in a single 64-byte
 310   // icache line.
 311   if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
 312     // force alignment after the cache check.
 313     __ align(CodeEntryAlignment);
 314   }
 315 
 316   __ bind(dont);
 317   return start_offset;
 318 }
 319 
 320 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 321   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 322   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
 323 
 324   Label L_skip_barrier;
 325 
 326   __ mov_metadata(rscratch2, method->holder()->constant_encoding());
 327   __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);
 328   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 329   __ bind(L_skip_barrier);
 330 }
 331 
 332 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 333   if (o == NULL) {
 334     __ mov(reg, zr);
 335   } else {
 336     __ movoop(reg, o, /*immediate*/true);
 337   }
 338 }
 339 
 340 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 341   address target = NULL;
 342   relocInfo::relocType reloc_type = relocInfo::none;
 343 
 344   switch (patching_id(info)) {
 345   case PatchingStub::access_field_id:
 346     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 347     reloc_type = relocInfo::section_word_type;
 348     break;
 349   case PatchingStub::load_klass_id:
 350     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 351     reloc_type = relocInfo::metadata_type;
 352     break;
 353   case PatchingStub::load_mirror_id:
 354     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 355     reloc_type = relocInfo::oop_type;
 356     break;
 357   case PatchingStub::load_appendix_id:
 358     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 359     reloc_type = relocInfo::oop_type;
 360     break;
 361   default: ShouldNotReachHere();
 362   }
 363 
 364   __ far_call(RuntimeAddress(target));
 365   add_call_info_here(info);
 366 }
 367 
 368 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 369   deoptimize_trap(info);
 370 }
 371 
 372 
 373 // This specifies the rsp decrement needed to build the frame
 374 int LIR_Assembler::initial_frame_size_in_bytes() const {
 375   // if rounding, must let FrameMap know!
 376 
 377   return in_bytes(frame_map()->framesize_in_bytes());
 378 }
 379 
 380 
 381 int LIR_Assembler::emit_exception_handler() {
 382   // if the last instruction is a call (typically to do a throw which
 383   // is coming at the end after block reordering) the return address
 384   // must still point into the code area in order to avoid assertion
 385   // failures when searching for the corresponding bci => add a nop
 386   // (was bug 5/14/1999 - gri)
 387   __ nop();
 388 
 389   // generate code for exception handler
 390   address handler_base = __ start_a_stub(exception_handler_size());
 391   if (handler_base == NULL) {
 392     // not enough space left for the handler
 393     bailout("exception handler overflow");
 394     return -1;
 395   }
 396 
 397   int offset = code_offset();
 398 
 399   // the exception oop and pc are in r0, and r3
 400   // no other registers need to be preserved, so invalidate them
 401   __ invalidate_registers(false, true, true, false, true, true);
 402 
 403   // check that there is really an exception
 404   __ verify_not_null_oop(r0);
 405 
 406   // search an exception handler (r0: exception oop, r3: throwing pc)
 407   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));  __ should_not_reach_here();
 408   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 409   __ end_a_stub();
 410 
 411   return offset;
 412 }
 413 
 414 
 415 // Emit the code to remove the frame from the stack in the exception
 416 // unwind path.
 417 int LIR_Assembler::emit_unwind_handler() {
 418 #ifndef PRODUCT
 419   if (CommentedAssembly) {
 420     _masm->block_comment("Unwind handler");
 421   }
 422 #endif
 423 
 424   int offset = code_offset();
 425 
 426   // Fetch the exception from TLS and clear out exception related thread state
 427   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 428   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 429   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 430 
 431   __ bind(_unwind_handler_entry);
 432   __ verify_not_null_oop(r0);
 433   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 434     __ mov(r19, r0);  // Preserve the exception
 435   }
 436 
 437   // Preform needed unlocking
 438   MonitorExitStub* stub = NULL;
 439   if (method()->is_synchronized()) {
 440     monitor_address(0, FrameMap::r0_opr);
 441     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 442     __ unlock_object(r5, r4, r0, *stub->entry());
 443     __ bind(*stub->continuation());
 444   }
 445 
 446   if (compilation()->env()->dtrace_method_probes()) {
 447     __ mov(c_rarg0, rthread);
 448     __ mov_metadata(c_rarg1, method()->constant_encoding());
 449     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 450   }
 451 
 452   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 453     __ mov(r0, r19);  // Restore the exception
 454   }
 455 
 456   // remove the activation and dispatch to the unwind handler
 457   __ block_comment("remove_frame and dispatch to the unwind handler");
 458   __ remove_frame(initial_frame_size_in_bytes());
 459   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 460 
 461   // Emit the slow path assembly
 462   if (stub != NULL) {
 463     stub->emit_code(this);
 464   }
 465 
 466   return offset;
 467 }
 468 
 469 
 470 int LIR_Assembler::emit_deopt_handler() {
 471   // if the last instruction is a call (typically to do a throw which
 472   // is coming at the end after block reordering) the return address
 473   // must still point into the code area in order to avoid assertion
 474   // failures when searching for the corresponding bci => add a nop
 475   // (was bug 5/14/1999 - gri)
 476   __ nop();
 477 
 478   // generate code for exception handler
 479   address handler_base = __ start_a_stub(deopt_handler_size());
 480   if (handler_base == NULL) {
 481     // not enough space left for the handler
 482     bailout("deopt handler overflow");
 483     return -1;
 484   }
 485 
 486   int offset = code_offset();
 487 
 488   __ adr(lr, pc());
 489   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 490   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 491   __ end_a_stub();
 492 
 493   return offset;
 494 }
 495 
 496 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 497   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 498   int pc_offset = code_offset();
 499   flush_debug_info(pc_offset);
 500   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 501   if (info->exception_handlers() != NULL) {
 502     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 503   }
 504 }
 505 
 506 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 507   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 508 
 509   // Pop the stack before the safepoint code
 510   __ remove_frame(initial_frame_size_in_bytes());
 511 
 512   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 513     __ reserved_stack_check();
 514   }
 515 
 516   code_stub->set_safepoint_offset(__ offset());
 517   __ relocate(relocInfo::poll_return_type);
 518   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 519   __ ret(lr);
 520 }
 521 
 522 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 523   guarantee(info != NULL, "Shouldn't be NULL");
 524   __ get_polling_page(rscratch1, relocInfo::poll_type);
 525   add_debug_info_for_branch(info);  // This isn't just debug info:
 526                                     // it's the oop map
 527   __ read_polling_page(rscratch1, relocInfo::poll_type);
 528   return __ offset();
 529 }
 530 
 531 
 532 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 533   if (from_reg == r31_sp)
 534     from_reg = sp;
 535   if (to_reg == r31_sp)
 536     to_reg = sp;
 537   __ mov(to_reg, from_reg);
 538 }
 539 
 540 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 541 
 542 
 543 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 544   assert(src->is_constant(), "should not call otherwise");
 545   assert(dest->is_register(), "should not call otherwise");
 546   LIR_Const* c = src->as_constant_ptr();
 547 
 548   switch (c->type()) {
 549     case T_INT: {
 550       assert(patch_code == lir_patch_none, "no patching handled here");
 551       __ movw(dest->as_register(), c->as_jint());
 552       break;
 553     }
 554 
 555     case T_ADDRESS: {
 556       assert(patch_code == lir_patch_none, "no patching handled here");
 557       __ mov(dest->as_register(), c->as_jint());
 558       break;
 559     }
 560 
 561     case T_LONG: {
 562       assert(patch_code == lir_patch_none, "no patching handled here");
 563       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 564       break;
 565     }
 566 
 567     case T_OBJECT: {
 568         if (patch_code == lir_patch_none) {
 569           jobject2reg(c->as_jobject(), dest->as_register());
 570         } else {
 571           jobject2reg_with_patching(dest->as_register(), info);
 572         }
 573       break;
 574     }
 575 
 576     case T_METADATA: {
 577       if (patch_code != lir_patch_none) {
 578         klass2reg_with_patching(dest->as_register(), info);
 579       } else {
 580         __ mov_metadata(dest->as_register(), c->as_metadata());
 581       }
 582       break;
 583     }
 584 
 585     case T_FLOAT: {
 586       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 587         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 588       } else {
 589         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 590         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 591       }
 592       break;
 593     }
 594 
 595     case T_DOUBLE: {
 596       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 597         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 598       } else {
 599         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 600         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 601       }
 602       break;
 603     }
 604 
 605     default:
 606       ShouldNotReachHere();
 607   }
 608 }
 609 
 610 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 611   LIR_Const* c = src->as_constant_ptr();
 612   switch (c->type()) {
 613   case T_OBJECT:
 614     {
 615       if (! c->as_jobject())
 616         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 617       else {
 618         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 619         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 620       }
 621     }
 622     break;
 623   case T_ADDRESS:
 624     {
 625       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 626       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 627     }
 628   case T_INT:
 629   case T_FLOAT:
 630     {
 631       Register reg = zr;
 632       if (c->as_jint_bits() == 0)
 633         __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 634       else {
 635         __ movw(rscratch1, c->as_jint_bits());
 636         __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
 637       }
 638     }
 639     break;
 640   case T_LONG:
 641   case T_DOUBLE:
 642     {
 643       Register reg = zr;
 644       if (c->as_jlong_bits() == 0)
 645         __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 646                                                  lo_word_offset_in_bytes));
 647       else {
 648         __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
 649         __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
 650                                                         lo_word_offset_in_bytes));
 651       }
 652     }
 653     break;
 654   default:
 655     ShouldNotReachHere();
 656   }
 657 }
 658 
 659 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 660   assert(src->is_constant(), "should not call otherwise");
 661   LIR_Const* c = src->as_constant_ptr();
 662   LIR_Address* to_addr = dest->as_address_ptr();
 663 
 664   void (Assembler::* insn)(Register Rt, const Address &adr);
 665 
 666   switch (type) {
 667   case T_ADDRESS:
 668     assert(c->as_jint() == 0, "should be");
 669     insn = &Assembler::str;
 670     break;
 671   case T_LONG:
 672     assert(c->as_jlong() == 0, "should be");
 673     insn = &Assembler::str;
 674     break;
 675   case T_INT:
 676     assert(c->as_jint() == 0, "should be");
 677     insn = &Assembler::strw;
 678     break;
 679   case T_OBJECT:
 680   case T_ARRAY:
 681     assert(c->as_jobject() == 0, "should be");
 682     if (UseCompressedOops && !wide) {
 683       insn = &Assembler::strw;
 684     } else {
 685       insn = &Assembler::str;
 686     }
 687     break;
 688   case T_CHAR:
 689   case T_SHORT:
 690     assert(c->as_jint() == 0, "should be");
 691     insn = &Assembler::strh;
 692     break;
 693   case T_BOOLEAN:
 694   case T_BYTE:
 695     assert(c->as_jint() == 0, "should be");
 696     insn = &Assembler::strb;
 697     break;
 698   default:
 699     ShouldNotReachHere();
 700     insn = &Assembler::str;  // unreachable
 701   }
 702 
 703   if (info) add_debug_info_for_null_check_here(info);
 704   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 705 }
 706 
 707 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 708   assert(src->is_register(), "should not call otherwise");
 709   assert(dest->is_register(), "should not call otherwise");
 710 
 711   // move between cpu-registers
 712   if (dest->is_single_cpu()) {
 713     if (src->type() == T_LONG) {
 714       // Can do LONG -> OBJECT
 715       move_regs(src->as_register_lo(), dest->as_register());
 716       return;
 717     }
 718     assert(src->is_single_cpu(), "must match");
 719     if (src->type() == T_OBJECT) {
 720       __ verify_oop(src->as_register());
 721     }
 722     move_regs(src->as_register(), dest->as_register());
 723 
 724   } else if (dest->is_double_cpu()) {
 725     if (is_reference_type(src->type())) {
 726       // Surprising to me but we can see move of a long to t_object
 727       __ verify_oop(src->as_register());
 728       move_regs(src->as_register(), dest->as_register_lo());
 729       return;
 730     }
 731     assert(src->is_double_cpu(), "must match");
 732     Register f_lo = src->as_register_lo();
 733     Register f_hi = src->as_register_hi();
 734     Register t_lo = dest->as_register_lo();
 735     Register t_hi = dest->as_register_hi();
 736     assert(f_hi == f_lo, "must be same");
 737     assert(t_hi == t_lo, "must be same");
 738     move_regs(f_lo, t_lo);
 739 
 740   } else if (dest->is_single_fpu()) {
 741     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 742 
 743   } else if (dest->is_double_fpu()) {
 744     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 745 
 746   } else {
 747     ShouldNotReachHere();
 748   }
 749 }
 750 
 751 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 752   precond(src->is_register() && dest->is_stack());
 753 
 754   uint const c_sz32 = sizeof(uint32_t);
 755   uint const c_sz64 = sizeof(uint64_t);
 756 
 757   if (src->is_single_cpu()) {
 758     int index = dest->single_stack_ix();
 759     if (is_reference_type(type)) {
 760       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 761       __ verify_oop(src->as_register());
 762     } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
 763       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 764     } else {
 765       __ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 766     }
 767 
 768   } else if (src->is_double_cpu()) {
 769     int index = dest->double_stack_ix();
 770     Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 771     __ str(src->as_register_lo(), dest_addr_LO);
 772 
 773   } else if (src->is_single_fpu()) {
 774     int index = dest->single_stack_ix();
 775     __ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 776 
 777   } else if (src->is_double_fpu()) {
 778     int index = dest->double_stack_ix();
 779     __ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 780 
 781   } else {
 782     ShouldNotReachHere();
 783   }
 784 }
 785 
 786 
 787 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
 788   LIR_Address* to_addr = dest->as_address_ptr();
 789   PatchingStub* patch = NULL;
 790   Register compressed_src = rscratch1;
 791 
 792   if (patch_code != lir_patch_none) {
 793     deoptimize_trap(info);
 794     return;
 795   }
 796 
 797   if (is_reference_type(type)) {
 798     __ verify_oop(src->as_register());
 799 
 800     if (UseCompressedOops && !wide) {
 801       __ encode_heap_oop(compressed_src, src->as_register());
 802     } else {
 803       compressed_src = src->as_register();
 804     }
 805   }
 806 
 807   int null_check_here = code_offset();
 808   switch (type) {
 809     case T_FLOAT: {
 810       __ strs(src->as_float_reg(), as_Address(to_addr));
 811       break;
 812     }
 813 
 814     case T_DOUBLE: {
 815       __ strd(src->as_double_reg(), as_Address(to_addr));
 816       break;
 817     }
 818 
 819     case T_ARRAY:   // fall through
 820     case T_OBJECT:  // fall through
 821       if (UseCompressedOops && !wide) {
 822         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 823       } else {
 824          __ str(compressed_src, as_Address(to_addr));
 825       }
 826       break;
 827     case T_METADATA:
 828       // We get here to store a method pointer to the stack to pass to
 829       // a dtrace runtime call. This can't work on 64 bit with
 830       // compressed klass ptrs: T_METADATA can be a compressed klass
 831       // ptr or a 64 bit method pointer.
 832       ShouldNotReachHere();
 833       __ str(src->as_register(), as_Address(to_addr));
 834       break;
 835     case T_ADDRESS:
 836       __ str(src->as_register(), as_Address(to_addr));
 837       break;
 838     case T_INT:
 839       __ strw(src->as_register(), as_Address(to_addr));
 840       break;
 841 
 842     case T_LONG: {
 843       __ str(src->as_register_lo(), as_Address_lo(to_addr));
 844       break;
 845     }
 846 
 847     case T_BYTE:    // fall through
 848     case T_BOOLEAN: {
 849       __ strb(src->as_register(), as_Address(to_addr));
 850       break;
 851     }
 852 
 853     case T_CHAR:    // fall through
 854     case T_SHORT:
 855       __ strh(src->as_register(), as_Address(to_addr));
 856       break;
 857 
 858     default:
 859       ShouldNotReachHere();
 860   }
 861   if (info != NULL) {
 862     add_debug_info_for_null_check(null_check_here, info);
 863   }
 864 }
 865 
 866 
 867 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 868   precond(src->is_stack() && dest->is_register());
 869 
 870   uint const c_sz32 = sizeof(uint32_t);
 871   uint const c_sz64 = sizeof(uint64_t);
 872 
 873   if (dest->is_single_cpu()) {
 874     int index = src->single_stack_ix();
 875     if (is_reference_type(type)) {
 876       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 877       __ verify_oop(dest->as_register());
 878     } else if (type == T_METADATA || type == T_ADDRESS) {
 879       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 880     } else {
 881       __ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 882     }
 883 
 884   } else if (dest->is_double_cpu()) {
 885     int index = src->double_stack_ix();
 886     Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 887     __ ldr(dest->as_register_lo(), src_addr_LO);
 888 
 889   } else if (dest->is_single_fpu()) {
 890     int index = src->single_stack_ix();
 891     __ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 892 
 893   } else if (dest->is_double_fpu()) {
 894     int index = src->double_stack_ix();
 895     __ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 896 
 897   } else {
 898     ShouldNotReachHere();
 899   }
 900 }
 901 
 902 
 903 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 904   address target = NULL;
 905   relocInfo::relocType reloc_type = relocInfo::none;
 906 
 907   switch (patching_id(info)) {
 908   case PatchingStub::access_field_id:
 909     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 910     reloc_type = relocInfo::section_word_type;
 911     break;
 912   case PatchingStub::load_klass_id:
 913     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 914     reloc_type = relocInfo::metadata_type;
 915     break;
 916   case PatchingStub::load_mirror_id:
 917     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 918     reloc_type = relocInfo::oop_type;
 919     break;
 920   case PatchingStub::load_appendix_id:
 921     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 922     reloc_type = relocInfo::oop_type;
 923     break;
 924   default: ShouldNotReachHere();
 925   }
 926 
 927   __ far_call(RuntimeAddress(target));
 928   add_call_info_here(info);
 929 }
 930 
 931 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 932 
 933   LIR_Opr temp;
 934   if (type == T_LONG || type == T_DOUBLE)
 935     temp = FrameMap::rscratch1_long_opr;
 936   else
 937     temp = FrameMap::rscratch1_opr;
 938 
 939   stack2reg(src, temp, src->type());
 940   reg2stack(temp, dest, dest->type(), false);
 941 }
 942 
 943 
 944 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 945   LIR_Address* addr = src->as_address_ptr();
 946   LIR_Address* from_addr = src->as_address_ptr();
 947 
 948   if (addr->base()->type() == T_OBJECT) {
 949     __ verify_oop(addr->base()->as_pointer_register());
 950   }
 951 
 952   if (patch_code != lir_patch_none) {
 953     deoptimize_trap(info);
 954     return;
 955   }
 956 
 957   if (info != NULL) {
 958     add_debug_info_for_null_check_here(info);
 959   }
 960   int null_check_here = code_offset();
 961   switch (type) {
 962     case T_FLOAT: {
 963       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 964       break;
 965     }
 966 
 967     case T_DOUBLE: {
 968       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 969       break;
 970     }
 971 
 972     case T_ARRAY:   // fall through
 973     case T_OBJECT:  // fall through
 974       if (UseCompressedOops && !wide) {
 975         __ ldrw(dest->as_register(), as_Address(from_addr));
 976       } else {
 977          __ ldr(dest->as_register(), as_Address(from_addr));
 978       }
 979       break;
 980     case T_METADATA:
 981       // We get here to store a method pointer to the stack to pass to
 982       // a dtrace runtime call. This can't work on 64 bit with
 983       // compressed klass ptrs: T_METADATA can be a compressed klass
 984       // ptr or a 64 bit method pointer.
 985       ShouldNotReachHere();
 986       __ ldr(dest->as_register(), as_Address(from_addr));
 987       break;
 988     case T_ADDRESS:
 989       __ ldr(dest->as_register(), as_Address(from_addr));
 990       break;
 991     case T_INT:
 992       __ ldrw(dest->as_register(), as_Address(from_addr));
 993       break;
 994 
 995     case T_LONG: {
 996       __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
 997       break;
 998     }
 999 
1000     case T_BYTE:
1001       __ ldrsb(dest->as_register(), as_Address(from_addr));
1002       break;
1003     case T_BOOLEAN: {
1004       __ ldrb(dest->as_register(), as_Address(from_addr));
1005       break;
1006     }
1007 
1008     case T_CHAR:
1009       __ ldrh(dest->as_register(), as_Address(from_addr));
1010       break;
1011     case T_SHORT:
1012       __ ldrsh(dest->as_register(), as_Address(from_addr));
1013       break;
1014 
1015     default:
1016       ShouldNotReachHere();
1017   }
1018 
1019   if (is_reference_type(type)) {
1020     if (UseCompressedOops && !wide) {
1021       __ decode_heap_oop(dest->as_register());
1022     }
1023 
1024     if (!UseZGC) {
1025       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1026       __ verify_oop(dest->as_register());
1027     }
1028   }
1029 }
1030 
1031 
1032 int LIR_Assembler::array_element_size(BasicType type) const {
1033   int elem_size = type2aelembytes(type);
1034   return exact_log2(elem_size);
1035 }
1036 
1037 
1038 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1039   switch (op->code()) {
1040   case lir_idiv:
1041   case lir_irem:
1042     arithmetic_idiv(op->code(),
1043                     op->in_opr1(),
1044                     op->in_opr2(),
1045                     op->in_opr3(),
1046                     op->result_opr(),
1047                     op->info());
1048     break;
1049   case lir_fmad:
1050     __ fmaddd(op->result_opr()->as_double_reg(),
1051               op->in_opr1()->as_double_reg(),
1052               op->in_opr2()->as_double_reg(),
1053               op->in_opr3()->as_double_reg());
1054     break;
1055   case lir_fmaf:
1056     __ fmadds(op->result_opr()->as_float_reg(),
1057               op->in_opr1()->as_float_reg(),
1058               op->in_opr2()->as_float_reg(),
1059               op->in_opr3()->as_float_reg());
1060     break;
1061   default:      ShouldNotReachHere(); break;
1062   }
1063 }
1064 
1065 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1066 #ifdef ASSERT
1067   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1068   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1069   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1070 #endif
1071 
1072   if (op->cond() == lir_cond_always) {
1073     if (op->info() != NULL) add_debug_info_for_branch(op->info());
1074     __ b(*(op->label()));
1075   } else {
1076     Assembler::Condition acond;
1077     if (op->code() == lir_cond_float_branch) {
1078       bool is_unordered = (op->ublock() == op->block());
1079       // Assembler::EQ does not permit unordered branches, so we add
1080       // another branch here.  Likewise, Assembler::NE does not permit
1081       // ordered branches.
1082       if ((is_unordered && op->cond() == lir_cond_equal)
1083           || (!is_unordered && op->cond() == lir_cond_notEqual))
1084         __ br(Assembler::VS, *(op->ublock()->label()));
1085       switch(op->cond()) {
1086       case lir_cond_equal:        acond = Assembler::EQ; break;
1087       case lir_cond_notEqual:     acond = Assembler::NE; break;
1088       case lir_cond_less:         acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1089       case lir_cond_lessEqual:    acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1090       case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1091       case lir_cond_greater:      acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1092       default:                    ShouldNotReachHere();
1093         acond = Assembler::EQ;  // unreachable
1094       }
1095     } else {
1096       switch (op->cond()) {
1097         case lir_cond_equal:        acond = Assembler::EQ; break;
1098         case lir_cond_notEqual:     acond = Assembler::NE; break;
1099         case lir_cond_less:         acond = Assembler::LT; break;
1100         case lir_cond_lessEqual:    acond = Assembler::LE; break;
1101         case lir_cond_greaterEqual: acond = Assembler::GE; break;
1102         case lir_cond_greater:      acond = Assembler::GT; break;
1103         case lir_cond_belowEqual:   acond = Assembler::LS; break;
1104         case lir_cond_aboveEqual:   acond = Assembler::HS; break;
1105         default:                    ShouldNotReachHere();
1106           acond = Assembler::EQ;  // unreachable
1107       }
1108     }
1109     __ br(acond,*(op->label()));
1110   }
1111 }
1112 
1113 
1114 
1115 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1116   LIR_Opr src  = op->in_opr();
1117   LIR_Opr dest = op->result_opr();
1118 
1119   switch (op->bytecode()) {
1120     case Bytecodes::_i2f:
1121       {
1122         __ scvtfws(dest->as_float_reg(), src->as_register());
1123         break;
1124       }
1125     case Bytecodes::_i2d:
1126       {
1127         __ scvtfwd(dest->as_double_reg(), src->as_register());
1128         break;
1129       }
1130     case Bytecodes::_l2d:
1131       {
1132         __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1133         break;
1134       }
1135     case Bytecodes::_l2f:
1136       {
1137         __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1138         break;
1139       }
1140     case Bytecodes::_f2d:
1141       {
1142         __ fcvts(dest->as_double_reg(), src->as_float_reg());
1143         break;
1144       }
1145     case Bytecodes::_d2f:
1146       {
1147         __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1148         break;
1149       }
1150     case Bytecodes::_i2c:
1151       {
1152         __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1153         break;
1154       }
1155     case Bytecodes::_i2l:
1156       {
1157         __ sxtw(dest->as_register_lo(), src->as_register());
1158         break;
1159       }
1160     case Bytecodes::_i2s:
1161       {
1162         __ sxth(dest->as_register(), src->as_register());
1163         break;
1164       }
1165     case Bytecodes::_i2b:
1166       {
1167         __ sxtb(dest->as_register(), src->as_register());
1168         break;
1169       }
1170     case Bytecodes::_l2i:
1171       {
1172         _masm->block_comment("FIXME: This could be a no-op");
1173         __ uxtw(dest->as_register(), src->as_register_lo());
1174         break;
1175       }
1176     case Bytecodes::_d2l:
1177       {
1178         __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1179         break;
1180       }
1181     case Bytecodes::_f2i:
1182       {
1183         __ fcvtzsw(dest->as_register(), src->as_float_reg());
1184         break;
1185       }
1186     case Bytecodes::_f2l:
1187       {
1188         __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1189         break;
1190       }
1191     case Bytecodes::_d2i:
1192       {
1193         __ fcvtzdw(dest->as_register(), src->as_double_reg());
1194         break;
1195       }
1196     default: ShouldNotReachHere();
1197   }
1198 }
1199 
1200 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1201   if (op->init_check()) {
1202     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1203                                InstanceKlass::init_state_offset()));
1204     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1205     add_debug_info_for_null_check_here(op->stub()->info());
1206     __ br(Assembler::NE, *op->stub()->entry());
1207   }
1208   __ allocate_object(op->obj()->as_register(),
1209                      op->tmp1()->as_register(),
1210                      op->tmp2()->as_register(),
1211                      op->header_size(),
1212                      op->object_size(),
1213                      op->klass()->as_register(),
1214                      *op->stub()->entry());
1215   __ bind(*op->stub()->continuation());
1216 }
1217 
1218 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1219   Register len =  op->len()->as_register();
1220   __ uxtw(len, len);
1221 
1222   if (UseSlowPath ||
1223       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1224       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1225     __ b(*op->stub()->entry());
1226   } else {
1227     Register tmp1 = op->tmp1()->as_register();
1228     Register tmp2 = op->tmp2()->as_register();
1229     Register tmp3 = op->tmp3()->as_register();
1230     if (len == tmp1) {
1231       tmp1 = tmp3;
1232     } else if (len == tmp2) {
1233       tmp2 = tmp3;
1234     } else if (len == tmp3) {
1235       // everything is ok
1236     } else {
1237       __ mov(tmp3, len);
1238     }
1239     __ allocate_array(op->obj()->as_register(),
1240                       len,
1241                       tmp1,
1242                       tmp2,
1243                       arrayOopDesc::header_size(op->type()),
1244                       array_element_size(op->type()),
1245                       op->klass()->as_register(),
1246                       *op->stub()->entry());
1247   }
1248   __ bind(*op->stub()->continuation());
1249 }
1250 
1251 void LIR_Assembler::type_profile_helper(Register mdo,
1252                                         ciMethodData *md, ciProfileData *data,
1253                                         Register recv, Label* update_done) {
1254   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1255     Label next_test;
1256     // See if the receiver is receiver[n].
1257     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1258     __ ldr(rscratch1, Address(rscratch2));
1259     __ cmp(recv, rscratch1);
1260     __ br(Assembler::NE, next_test);
1261     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1262     __ addptr(data_addr, DataLayout::counter_increment);
1263     __ b(*update_done);
1264     __ bind(next_test);
1265   }
1266 
1267   // Didn't find receiver; find next empty slot and fill it in
1268   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1269     Label next_test;
1270     __ lea(rscratch2,
1271            Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1272     Address recv_addr(rscratch2);
1273     __ ldr(rscratch1, recv_addr);
1274     __ cbnz(rscratch1, next_test);
1275     __ str(recv, recv_addr);
1276     __ mov(rscratch1, DataLayout::counter_increment);
1277     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1278     __ str(rscratch1, Address(rscratch2));
1279     __ b(*update_done);
1280     __ bind(next_test);
1281   }
1282 }
1283 
1284 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1285   // we always need a stub for the failure case.
1286   CodeStub* stub = op->stub();
1287   Register obj = op->object()->as_register();
1288   Register k_RInfo = op->tmp1()->as_register();
1289   Register klass_RInfo = op->tmp2()->as_register();
1290   Register dst = op->result_opr()->as_register();
1291   ciKlass* k = op->klass();
1292   Register Rtmp1 = noreg;
1293 
1294   // check if it needs to be profiled
1295   ciMethodData* md;
1296   ciProfileData* data;
1297 
1298   const bool should_profile = op->should_profile();
1299 
1300   if (should_profile) {
1301     ciMethod* method = op->profiled_method();
1302     assert(method != NULL, "Should have method");
1303     int bci = op->profiled_bci();
1304     md = method->method_data_or_null();
1305     assert(md != NULL, "Sanity");
1306     data = md->bci_to_data(bci);
1307     assert(data != NULL,                "need data for type check");
1308     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1309   }
1310   Label profile_cast_success, profile_cast_failure;
1311   Label *success_target = should_profile ? &profile_cast_success : success;
1312   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1313 
1314   if (obj == k_RInfo) {
1315     k_RInfo = dst;
1316   } else if (obj == klass_RInfo) {
1317     klass_RInfo = dst;
1318   }
1319   if (k->is_loaded() && !UseCompressedClassPointers) {
1320     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1321   } else {
1322     Rtmp1 = op->tmp3()->as_register();
1323     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1324   }
1325 
1326   assert_different_registers(obj, k_RInfo, klass_RInfo);
1327 
1328     if (should_profile) {
1329       Label not_null;
1330       __ cbnz(obj, not_null);
1331       // Object is null; update MDO and exit
1332       Register mdo  = klass_RInfo;
1333       __ mov_metadata(mdo, md->constant_encoding());
1334       Address data_addr
1335         = __ form_address(rscratch2, mdo,
1336                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1337                           0);
1338       __ ldrb(rscratch1, data_addr);
1339       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1340       __ strb(rscratch1, data_addr);
1341       __ b(*obj_is_null);
1342       __ bind(not_null);
1343     } else {
1344       __ cbz(obj, *obj_is_null);
1345     }
1346 
1347   if (!k->is_loaded()) {
1348     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1349   } else {
1350     __ mov_metadata(k_RInfo, k->constant_encoding());
1351   }
1352   __ verify_oop(obj);
1353 
1354   if (op->fast_check()) {
1355     // get object class
1356     // not a safepoint as obj null check happens earlier
1357     __ load_klass(rscratch1, obj);
1358     __ cmp( rscratch1, k_RInfo);
1359 
1360     __ br(Assembler::NE, *failure_target);
1361     // successful cast, fall through to profile or jump
1362   } else {
1363     // get object class
1364     // not a safepoint as obj null check happens earlier
1365     __ load_klass(klass_RInfo, obj);
1366     if (k->is_loaded()) {
1367       // See if we get an immediate positive hit
1368       __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1369       __ cmp(k_RInfo, rscratch1);
1370       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1371         __ br(Assembler::NE, *failure_target);
1372         // successful cast, fall through to profile or jump
1373       } else {
1374         // See if we get an immediate positive hit
1375         __ br(Assembler::EQ, *success_target);
1376         // check for self
1377         __ cmp(klass_RInfo, k_RInfo);
1378         __ br(Assembler::EQ, *success_target);
1379 
1380         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1381         __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1382         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1383         // result is a boolean
1384         __ cbzw(klass_RInfo, *failure_target);
1385         // successful cast, fall through to profile or jump
1386       }
1387     } else {
1388       // perform the fast part of the checking logic
1389       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1390       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1391       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1392       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1393       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1394       // result is a boolean
1395       __ cbz(k_RInfo, *failure_target);
1396       // successful cast, fall through to profile or jump
1397     }
1398   }
1399   if (should_profile) {
1400     Register mdo  = klass_RInfo, recv = k_RInfo;
1401     __ bind(profile_cast_success);
1402     __ mov_metadata(mdo, md->constant_encoding());
1403     __ load_klass(recv, obj);
1404     Label update_done;
1405     type_profile_helper(mdo, md, data, recv, success);
1406     __ b(*success);
1407 
1408     __ bind(profile_cast_failure);
1409     __ mov_metadata(mdo, md->constant_encoding());
1410     Address counter_addr
1411       = __ form_address(rscratch2, mdo,
1412                         md->byte_offset_of_slot(data, CounterData::count_offset()),
1413                         0);
1414     __ ldr(rscratch1, counter_addr);
1415     __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1416     __ str(rscratch1, counter_addr);
1417     __ b(*failure);
1418   }
1419   __ b(*success);
1420 }
1421 
1422 
1423 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1424   const bool should_profile = op->should_profile();
1425 
1426   LIR_Code code = op->code();
1427   if (code == lir_store_check) {
1428     Register value = op->object()->as_register();
1429     Register array = op->array()->as_register();
1430     Register k_RInfo = op->tmp1()->as_register();
1431     Register klass_RInfo = op->tmp2()->as_register();
1432     Register Rtmp1 = op->tmp3()->as_register();
1433 
1434     CodeStub* stub = op->stub();
1435 
1436     // check if it needs to be profiled
1437     ciMethodData* md;
1438     ciProfileData* data;
1439 
1440     if (should_profile) {
1441       ciMethod* method = op->profiled_method();
1442       assert(method != NULL, "Should have method");
1443       int bci = op->profiled_bci();
1444       md = method->method_data_or_null();
1445       assert(md != NULL, "Sanity");
1446       data = md->bci_to_data(bci);
1447       assert(data != NULL,                "need data for type check");
1448       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1449     }
1450     Label profile_cast_success, profile_cast_failure, done;
1451     Label *success_target = should_profile ? &profile_cast_success : &done;
1452     Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
1453 
1454     if (should_profile) {
1455       Label not_null;
1456       __ cbnz(value, not_null);
1457       // Object is null; update MDO and exit
1458       Register mdo  = klass_RInfo;
1459       __ mov_metadata(mdo, md->constant_encoding());
1460       Address data_addr
1461         = __ form_address(rscratch2, mdo,
1462                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1463                           0);
1464       __ ldrb(rscratch1, data_addr);
1465       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1466       __ strb(rscratch1, data_addr);
1467       __ b(done);
1468       __ bind(not_null);
1469     } else {
1470       __ cbz(value, done);
1471     }
1472 
1473     add_debug_info_for_null_check_here(op->info_for_exception());
1474     __ load_klass(k_RInfo, array);
1475     __ load_klass(klass_RInfo, value);
1476 
1477     // get instance klass (it's already uncompressed)
1478     __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1479     // perform the fast part of the checking logic
1480     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1481     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1482     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1483     __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1484     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1485     // result is a boolean
1486     __ cbzw(k_RInfo, *failure_target);
1487     // fall through to the success case
1488 
1489     if (should_profile) {
1490       Register mdo  = klass_RInfo, recv = k_RInfo;
1491       __ bind(profile_cast_success);
1492       __ mov_metadata(mdo, md->constant_encoding());
1493       __ load_klass(recv, value);
1494       Label update_done;
1495       type_profile_helper(mdo, md, data, recv, &done);
1496       __ b(done);
1497 
1498       __ bind(profile_cast_failure);
1499       __ mov_metadata(mdo, md->constant_encoding());
1500       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1501       __ lea(rscratch2, counter_addr);
1502       __ ldr(rscratch1, Address(rscratch2));
1503       __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1504       __ str(rscratch1, Address(rscratch2));
1505       __ b(*stub->entry());
1506     }
1507 
1508     __ bind(done);
1509   } else if (code == lir_checkcast) {
1510     Register obj = op->object()->as_register();
1511     Register dst = op->result_opr()->as_register();
1512     Label success;
1513     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1514     __ bind(success);
1515     if (dst != obj) {
1516       __ mov(dst, obj);
1517     }
1518   } else if (code == lir_instanceof) {
1519     Register obj = op->object()->as_register();
1520     Register dst = op->result_opr()->as_register();
1521     Label success, failure, done;
1522     emit_typecheck_helper(op, &success, &failure, &failure);
1523     __ bind(failure);
1524     __ mov(dst, zr);
1525     __ b(done);
1526     __ bind(success);
1527     __ mov(dst, 1);
1528     __ bind(done);
1529   } else {
1530     ShouldNotReachHere();
1531   }
1532 }
1533 
1534 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1535   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1536   __ cset(rscratch1, Assembler::NE);
1537   __ membar(__ AnyAny);
1538 }
1539 
1540 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1541   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1542   __ cset(rscratch1, Assembler::NE);
1543   __ membar(__ AnyAny);
1544 }
1545 
1546 
1547 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1548   assert(VM_Version::supports_cx8(), "wrong machine");
1549   Register addr;
1550   if (op->addr()->is_register()) {
1551     addr = as_reg(op->addr());
1552   } else {
1553     assert(op->addr()->is_address(), "what else?");
1554     LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1555     assert(addr_ptr->disp() == 0, "need 0 disp");
1556     assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1557     addr = as_reg(addr_ptr->base());
1558   }
1559   Register newval = as_reg(op->new_value());
1560   Register cmpval = as_reg(op->cmp_value());
1561 
1562   if (op->code() == lir_cas_obj) {
1563     if (UseCompressedOops) {
1564       Register t1 = op->tmp1()->as_register();
1565       assert(op->tmp1()->is_valid(), "must be");
1566       __ encode_heap_oop(t1, cmpval);
1567       cmpval = t1;
1568       __ encode_heap_oop(rscratch2, newval);
1569       newval = rscratch2;
1570       casw(addr, newval, cmpval);
1571     } else {
1572       casl(addr, newval, cmpval);
1573     }
1574   } else if (op->code() == lir_cas_int) {
1575     casw(addr, newval, cmpval);
1576   } else {
1577     casl(addr, newval, cmpval);
1578   }
1579 }
1580 
1581 
1582 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1583 
1584   Assembler::Condition acond, ncond;
1585   switch (condition) {
1586   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1587   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1588   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1589   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1590   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1591   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1592   case lir_cond_belowEqual:
1593   case lir_cond_aboveEqual:
1594   default:                    ShouldNotReachHere();
1595     acond = Assembler::EQ; ncond = Assembler::NE;  // unreachable
1596   }
1597 
1598   assert(result->is_single_cpu() || result->is_double_cpu(),
1599          "expect single register for result");
1600   if (opr1->is_constant() && opr2->is_constant()
1601       && opr1->type() == T_INT && opr2->type() == T_INT) {
1602     jint val1 = opr1->as_jint();
1603     jint val2 = opr2->as_jint();
1604     if (val1 == 0 && val2 == 1) {
1605       __ cset(result->as_register(), ncond);
1606       return;
1607     } else if (val1 == 1 && val2 == 0) {
1608       __ cset(result->as_register(), acond);
1609       return;
1610     }
1611   }
1612 
1613   if (opr1->is_constant() && opr2->is_constant()
1614       && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1615     jlong val1 = opr1->as_jlong();
1616     jlong val2 = opr2->as_jlong();
1617     if (val1 == 0 && val2 == 1) {
1618       __ cset(result->as_register_lo(), ncond);
1619       return;
1620     } else if (val1 == 1 && val2 == 0) {
1621       __ cset(result->as_register_lo(), acond);
1622       return;
1623     }
1624   }
1625 
1626   if (opr1->is_stack()) {
1627     stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1628     opr1 = FrameMap::rscratch1_opr;
1629   } else if (opr1->is_constant()) {
1630     LIR_Opr tmp
1631       = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1632     const2reg(opr1, tmp, lir_patch_none, NULL);
1633     opr1 = tmp;
1634   }
1635 
1636   if (opr2->is_stack()) {
1637     stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1638     opr2 = FrameMap::rscratch2_opr;
1639   } else if (opr2->is_constant()) {
1640     LIR_Opr tmp
1641       = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1642     const2reg(opr2, tmp, lir_patch_none, NULL);
1643     opr2 = tmp;
1644   }
1645 
1646   if (result->type() == T_LONG)
1647     __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1648   else
1649     __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1650 }
1651 
1652 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1653   assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1654 
1655   if (left->is_single_cpu()) {
1656     Register lreg = left->as_register();
1657     Register dreg = as_reg(dest);
1658 
1659     if (right->is_single_cpu()) {
1660       // cpu register - cpu register
1661 
1662       assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1663              "should be");
1664       Register rreg = right->as_register();
1665       switch (code) {
1666       case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1667       case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1668       case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1669       default:      ShouldNotReachHere();
1670       }
1671 
1672     } else if (right->is_double_cpu()) {
1673       Register rreg = right->as_register_lo();
1674       // single_cpu + double_cpu: can happen with obj+long
1675       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1676       switch (code) {
1677       case lir_add: __ add(dreg, lreg, rreg); break;
1678       case lir_sub: __ sub(dreg, lreg, rreg); break;
1679       default: ShouldNotReachHere();
1680       }
1681     } else if (right->is_constant()) {
1682       // cpu register - constant
1683       jlong c;
1684 
1685       // FIXME.  This is fugly: we really need to factor all this logic.
1686       switch(right->type()) {
1687       case T_LONG:
1688         c = right->as_constant_ptr()->as_jlong();
1689         break;
1690       case T_INT:
1691       case T_ADDRESS:
1692         c = right->as_constant_ptr()->as_jint();
1693         break;
1694       default:
1695         ShouldNotReachHere();
1696         c = 0;  // unreachable
1697         break;
1698       }
1699 
1700       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1701       if (c == 0 && dreg == lreg) {
1702         COMMENT("effective nop elided");
1703         return;
1704       }
1705       switch(left->type()) {
1706       case T_INT:
1707         switch (code) {
1708         case lir_add: __ addw(dreg, lreg, c); break;
1709         case lir_sub: __ subw(dreg, lreg, c); break;
1710         default: ShouldNotReachHere();
1711         }
1712         break;
1713       case T_OBJECT:
1714       case T_ADDRESS:
1715         switch (code) {
1716         case lir_add: __ add(dreg, lreg, c); break;
1717         case lir_sub: __ sub(dreg, lreg, c); break;
1718         default: ShouldNotReachHere();
1719         }
1720         break;
1721       default:
1722         ShouldNotReachHere();
1723       }
1724     } else {
1725       ShouldNotReachHere();
1726     }
1727 
1728   } else if (left->is_double_cpu()) {
1729     Register lreg_lo = left->as_register_lo();
1730 
1731     if (right->is_double_cpu()) {
1732       // cpu register - cpu register
1733       Register rreg_lo = right->as_register_lo();
1734       switch (code) {
1735       case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1736       case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1737       case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1738       case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1739       case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1740       default:
1741         ShouldNotReachHere();
1742       }
1743 
1744     } else if (right->is_constant()) {
1745       jlong c = right->as_constant_ptr()->as_jlong();
1746       Register dreg = as_reg(dest);
1747       switch (code) {
1748         case lir_add:
1749         case lir_sub:
1750           if (c == 0 && dreg == lreg_lo) {
1751             COMMENT("effective nop elided");
1752             return;
1753           }
1754           code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
1755           break;
1756         case lir_div:
1757           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1758           if (c == 1) {
1759             // move lreg_lo to dreg if divisor is 1
1760             __ mov(dreg, lreg_lo);
1761           } else {
1762             unsigned int shift = log2i_exact(c);
1763             // use rscratch1 as intermediate result register
1764             __ asr(rscratch1, lreg_lo, 63);
1765             __ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);
1766             __ asr(dreg, rscratch1, shift);
1767           }
1768           break;
1769         case lir_rem:
1770           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1771           if (c == 1) {
1772             // move 0 to dreg if divisor is 1
1773             __ mov(dreg, zr);
1774           } else {
1775             // use rscratch1 as intermediate result register
1776             __ negs(rscratch1, lreg_lo);
1777             __ andr(dreg, lreg_lo, c - 1);
1778             __ andr(rscratch1, rscratch1, c - 1);
1779             __ csneg(dreg, dreg, rscratch1, Assembler::MI);
1780           }
1781           break;
1782         default:
1783           ShouldNotReachHere();
1784       }
1785     } else {
1786       ShouldNotReachHere();
1787     }
1788   } else if (left->is_single_fpu()) {
1789     assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1790     switch (code) {
1791     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1792     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1793     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1794     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1795     default:
1796       ShouldNotReachHere();
1797     }
1798   } else if (left->is_double_fpu()) {
1799     if (right->is_double_fpu()) {
1800       // fpu register - fpu register
1801       switch (code) {
1802       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1803       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1804       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1805       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1806       default:
1807         ShouldNotReachHere();
1808       }
1809     } else {
1810       if (right->is_constant()) {
1811         ShouldNotReachHere();
1812       }
1813       ShouldNotReachHere();
1814     }
1815   } else if (left->is_single_stack() || left->is_address()) {
1816     assert(left == dest, "left and dest must be equal");
1817     ShouldNotReachHere();
1818   } else {
1819     ShouldNotReachHere();
1820   }
1821 }
1822 
1823 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
1824 
1825 
1826 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1827   switch(code) {
1828   case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1829   case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1830   default      : ShouldNotReachHere();
1831   }
1832 }
1833 
1834 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1835 
1836   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1837   Register Rleft = left->is_single_cpu() ? left->as_register() :
1838                                            left->as_register_lo();
1839    if (dst->is_single_cpu()) {
1840      Register Rdst = dst->as_register();
1841      if (right->is_constant()) {
1842        switch (code) {
1843          case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1844          case lir_logic_or:  __ orrw (Rdst, Rleft, right->as_jint()); break;
1845          case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1846          default: ShouldNotReachHere(); break;
1847        }
1848      } else {
1849        Register Rright = right->is_single_cpu() ? right->as_register() :
1850                                                   right->as_register_lo();
1851        switch (code) {
1852          case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1853          case lir_logic_or:  __ orrw (Rdst, Rleft, Rright); break;
1854          case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1855          default: ShouldNotReachHere(); break;
1856        }
1857      }
1858    } else {
1859      Register Rdst = dst->as_register_lo();
1860      if (right->is_constant()) {
1861        switch (code) {
1862          case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1863          case lir_logic_or:  __ orr (Rdst, Rleft, right->as_jlong()); break;
1864          case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1865          default: ShouldNotReachHere(); break;
1866        }
1867      } else {
1868        Register Rright = right->is_single_cpu() ? right->as_register() :
1869                                                   right->as_register_lo();
1870        switch (code) {
1871          case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1872          case lir_logic_or:  __ orr (Rdst, Rleft, Rright); break;
1873          case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1874          default: ShouldNotReachHere(); break;
1875        }
1876      }
1877    }
1878 }
1879 
1880 
1881 
1882 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {
1883 
1884   // opcode check
1885   assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
1886   bool is_irem = (code == lir_irem);
1887 
1888   // operand check
1889   assert(left->is_single_cpu(),   "left must be register");
1890   assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
1891   assert(result->is_single_cpu(), "result must be register");
1892   Register lreg = left->as_register();
1893   Register dreg = result->as_register();
1894 
1895   // power-of-2 constant check and codegen
1896   if (right->is_constant()) {
1897     int c = right->as_constant_ptr()->as_jint();
1898     assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1899     if (is_irem) {
1900       if (c == 1) {
1901         // move 0 to dreg if divisor is 1
1902         __ movw(dreg, zr);
1903       } else {
1904         // use rscratch1 as intermediate result register
1905         __ negsw(rscratch1, lreg);
1906         __ andw(dreg, lreg, c - 1);
1907         __ andw(rscratch1, rscratch1, c - 1);
1908         __ csnegw(dreg, dreg, rscratch1, Assembler::MI);
1909       }
1910     } else {
1911       if (c == 1) {
1912         // move lreg to dreg if divisor is 1
1913         __ movw(dreg, lreg);
1914       } else {
1915         unsigned int shift = exact_log2(c);
1916         // use rscratch1 as intermediate result register
1917         __ asrw(rscratch1, lreg, 31);
1918         __ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
1919         __ asrw(dreg, rscratch1, shift);
1920       }
1921     }
1922   } else {
1923     Register rreg = right->as_register();
1924     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1925   }
1926 }
1927 
1928 
1929 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1930   if (opr1->is_constant() && opr2->is_single_cpu()) {
1931     // tableswitch
1932     Register reg = as_reg(opr2);
1933     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1934     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1935   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1936     Register reg1 = as_reg(opr1);
1937     if (opr2->is_single_cpu()) {
1938       // cpu register - cpu register
1939       Register reg2 = opr2->as_register();
1940       if (is_reference_type(opr1->type())) {
1941         __ cmpoop(reg1, reg2);
1942       } else {
1943         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1944         __ cmpw(reg1, reg2);
1945       }
1946       return;
1947     }
1948     if (opr2->is_double_cpu()) {
1949       // cpu register - cpu register
1950       Register reg2 = opr2->as_register_lo();
1951       __ cmp(reg1, reg2);
1952       return;
1953     }
1954 
1955     if (opr2->is_constant()) {
1956       bool is_32bit = false; // width of register operand
1957       jlong imm;
1958 
1959       switch(opr2->type()) {
1960       case T_INT:
1961         imm = opr2->as_constant_ptr()->as_jint();
1962         is_32bit = true;
1963         break;
1964       case T_LONG:
1965         imm = opr2->as_constant_ptr()->as_jlong();
1966         break;
1967       case T_ADDRESS:
1968         imm = opr2->as_constant_ptr()->as_jint();
1969         break;
1970       case T_METADATA:
1971         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1972         break;
1973       case T_OBJECT:
1974       case T_ARRAY:
1975         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1976         __ cmpoop(reg1, rscratch1);
1977         return;
1978       default:
1979         ShouldNotReachHere();
1980         imm = 0;  // unreachable
1981         break;
1982       }
1983 
1984       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1985         if (is_32bit)
1986           __ cmpw(reg1, imm);
1987         else
1988           __ subs(zr, reg1, imm);
1989         return;
1990       } else {
1991         __ mov(rscratch1, imm);
1992         if (is_32bit)
1993           __ cmpw(reg1, rscratch1);
1994         else
1995           __ cmp(reg1, rscratch1);
1996         return;
1997       }
1998     } else
1999       ShouldNotReachHere();
2000   } else if (opr1->is_single_fpu()) {
2001     FloatRegister reg1 = opr1->as_float_reg();
2002     assert(opr2->is_single_fpu(), "expect single float register");
2003     FloatRegister reg2 = opr2->as_float_reg();
2004     __ fcmps(reg1, reg2);
2005   } else if (opr1->is_double_fpu()) {
2006     FloatRegister reg1 = opr1->as_double_reg();
2007     assert(opr2->is_double_fpu(), "expect double float register");
2008     FloatRegister reg2 = opr2->as_double_reg();
2009     __ fcmpd(reg1, reg2);
2010   } else {
2011     ShouldNotReachHere();
2012   }
2013 }
2014 
2015 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2016   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2017     bool is_unordered_less = (code == lir_ucmp_fd2i);
2018     if (left->is_single_fpu()) {
2019       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2020     } else if (left->is_double_fpu()) {
2021       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2022     } else {
2023       ShouldNotReachHere();
2024     }
2025   } else if (code == lir_cmp_l2i) {
2026     Label done;
2027     __ cmp(left->as_register_lo(), right->as_register_lo());
2028     __ mov(dst->as_register(), (uint64_t)-1L);
2029     __ br(Assembler::LT, done);
2030     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2031     __ bind(done);
2032   } else {
2033     ShouldNotReachHere();
2034   }
2035 }
2036 
2037 
2038 void LIR_Assembler::align_call(LIR_Code code) {  }
2039 
2040 
2041 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2042   address call = __ trampoline_call(Address(op->addr(), rtype));
2043   if (call == NULL) {
2044     bailout("trampoline stub overflow");
2045     return;
2046   }
2047   add_call_info(code_offset(), op->info());
2048 }
2049 
2050 
2051 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2052   address call = __ ic_call(op->addr());
2053   if (call == NULL) {
2054     bailout("trampoline stub overflow");
2055     return;
2056   }
2057   add_call_info(code_offset(), op->info());
2058 }
2059 
2060 void LIR_Assembler::emit_static_call_stub() {
2061   address call_pc = __ pc();
2062   address stub = __ start_a_stub(call_stub_size());
2063   if (stub == NULL) {
2064     bailout("static call stub overflow");
2065     return;
2066   }
2067 
2068   int start = __ offset();
2069 
2070   __ relocate(static_stub_Relocation::spec(call_pc));
2071   __ emit_static_call_stub();
2072 
2073   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2074         <= call_stub_size(), "stub too big");
2075   __ end_a_stub();
2076 }
2077 
2078 
2079 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2080   assert(exceptionOop->as_register() == r0, "must match");
2081   assert(exceptionPC->as_register() == r3, "must match");
2082 
2083   // exception object is not added to oop map by LinearScan
2084   // (LinearScan assumes that no oops are in fixed registers)
2085   info->add_register_oop(exceptionOop);
2086   Runtime1::StubID unwind_id;
2087 
2088   // get current pc information
2089   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2090   if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
2091     // As no instructions have been generated yet for this LIR node it's
2092     // possible that an oop map already exists for the current offset.
2093     // In that case insert an dummy NOP here to ensure all oop map PCs
2094     // are unique. See JDK-8237483.
2095     __ nop();
2096   }
2097   int pc_for_athrow_offset = __ offset();
2098   InternalAddress pc_for_athrow(__ pc());
2099   __ adr(exceptionPC->as_register(), pc_for_athrow);
2100   add_call_info(pc_for_athrow_offset, info); // for exception handler
2101 
2102   __ verify_not_null_oop(r0);
2103   // search an exception handler (r0: exception oop, r3: throwing pc)
2104   if (compilation()->has_fpu_code()) {
2105     unwind_id = Runtime1::handle_exception_id;
2106   } else {
2107     unwind_id = Runtime1::handle_exception_nofpu_id;
2108   }
2109   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2110 
2111   // FIXME: enough room for two byte trap   ????
2112   __ nop();
2113 }
2114 
2115 
2116 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2117   assert(exceptionOop->as_register() == r0, "must match");
2118 
2119   __ b(_unwind_handler_entry);
2120 }
2121 
2122 
2123 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2124   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2125   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2126 
2127   switch (left->type()) {
2128     case T_INT: {
2129       switch (code) {
2130       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2131       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2132       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2133       default:
2134         ShouldNotReachHere();
2135         break;
2136       }
2137       break;
2138     case T_LONG:
2139     case T_ADDRESS:
2140     case T_OBJECT:
2141       switch (code) {
2142       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2143       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2144       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2145       default:
2146         ShouldNotReachHere();
2147         break;
2148       }
2149       break;
2150     default:
2151       ShouldNotReachHere();
2152       break;
2153     }
2154   }
2155 }
2156 
2157 
2158 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2159   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2160   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2161 
2162   switch (left->type()) {
2163     case T_INT: {
2164       switch (code) {
2165       case lir_shl:  __ lslw (dreg, lreg, count); break;
2166       case lir_shr:  __ asrw (dreg, lreg, count); break;
2167       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2168       default:
2169         ShouldNotReachHere();
2170         break;
2171       }
2172       break;
2173     case T_LONG:
2174     case T_ADDRESS:
2175     case T_OBJECT:
2176       switch (code) {
2177       case lir_shl:  __ lsl (dreg, lreg, count); break;
2178       case lir_shr:  __ asr (dreg, lreg, count); break;
2179       case lir_ushr: __ lsr (dreg, lreg, count); break;
2180       default:
2181         ShouldNotReachHere();
2182         break;
2183       }
2184       break;
2185     default:
2186       ShouldNotReachHere();
2187       break;
2188     }
2189   }
2190 }
2191 
2192 
2193 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2194   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2195   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2196   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2197   __ str (r, Address(sp, offset_from_rsp_in_bytes));
2198 }
2199 
2200 
2201 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2202   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2203   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2204   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2205   __ mov (rscratch1, c);
2206   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2207 }
2208 
2209 
2210 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2211   ShouldNotReachHere();
2212   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2213   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2214   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2215   __ lea(rscratch1, __ constant_oop_address(o));
2216   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2217 }
2218 
2219 
2220 // This code replaces a call to arraycopy; no exception may
2221 // be thrown in this code, they must be thrown in the System.arraycopy
2222 // activation frame; we could save some checks if this would not be the case
2223 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2224   ciArrayKlass* default_type = op->expected_type();
2225   Register src = op->src()->as_register();
2226   Register dst = op->dst()->as_register();
2227   Register src_pos = op->src_pos()->as_register();
2228   Register dst_pos = op->dst_pos()->as_register();
2229   Register length  = op->length()->as_register();
2230   Register tmp = op->tmp()->as_register();
2231 
2232   CodeStub* stub = op->stub();
2233   int flags = op->flags();
2234   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2235   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2236 
2237   // if we don't know anything, just go through the generic arraycopy
2238   if (default_type == NULL // || basic_type == T_OBJECT
2239       ) {
2240     Label done;
2241     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2242 
2243     // Save the arguments in case the generic arraycopy fails and we
2244     // have to fall back to the JNI stub
2245     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2246     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2247     __ str(src,              Address(sp, 4*BytesPerWord));
2248 
2249     address copyfunc_addr = StubRoutines::generic_arraycopy();
2250     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2251 
2252     // The arguments are in java calling convention so we shift them
2253     // to C convention
2254     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2255     __ mov(c_rarg0, j_rarg0);
2256     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2257     __ mov(c_rarg1, j_rarg1);
2258     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2259     __ mov(c_rarg2, j_rarg2);
2260     assert_different_registers(c_rarg3, j_rarg4);
2261     __ mov(c_rarg3, j_rarg3);
2262     __ mov(c_rarg4, j_rarg4);
2263 #ifndef PRODUCT
2264     if (PrintC1Statistics) {
2265       __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2266     }
2267 #endif
2268     __ far_call(RuntimeAddress(copyfunc_addr));
2269 
2270     __ cbz(r0, *stub->continuation());
2271 
2272     // Reload values from the stack so they are where the stub
2273     // expects them.
2274     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2275     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2276     __ ldr(src,              Address(sp, 4*BytesPerWord));
2277 
2278     // r0 is -1^K where K == partial copied count
2279     __ eonw(rscratch1, r0, zr);
2280     // adjust length down and src/end pos up by partial copied count
2281     __ subw(length, length, rscratch1);
2282     __ addw(src_pos, src_pos, rscratch1);
2283     __ addw(dst_pos, dst_pos, rscratch1);
2284     __ b(*stub->entry());
2285 
2286     __ bind(*stub->continuation());
2287     return;
2288   }
2289 
2290   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2291 
2292   int elem_size = type2aelembytes(basic_type);
2293   int scale = exact_log2(elem_size);
2294 
2295   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2296   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2297   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2298   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2299 
2300   // test for NULL
2301   if (flags & LIR_OpArrayCopy::src_null_check) {
2302     __ cbz(src, *stub->entry());
2303   }
2304   if (flags & LIR_OpArrayCopy::dst_null_check) {
2305     __ cbz(dst, *stub->entry());
2306   }
2307 
2308   // If the compiler was not able to prove that exact type of the source or the destination
2309   // of the arraycopy is an array type, check at runtime if the source or the destination is
2310   // an instance type.
2311   if (flags & LIR_OpArrayCopy::type_check) {
2312     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2313       __ load_klass(tmp, dst);
2314       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2315       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2316       __ br(Assembler::GE, *stub->entry());
2317     }
2318 
2319     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2320       __ load_klass(tmp, src);
2321       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2322       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2323       __ br(Assembler::GE, *stub->entry());
2324     }
2325   }
2326 
2327   // check if negative
2328   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2329     __ cmpw(src_pos, 0);
2330     __ br(Assembler::LT, *stub->entry());
2331   }
2332   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2333     __ cmpw(dst_pos, 0);
2334     __ br(Assembler::LT, *stub->entry());
2335   }
2336 
2337   if (flags & LIR_OpArrayCopy::length_positive_check) {
2338     __ cmpw(length, 0);
2339     __ br(Assembler::LT, *stub->entry());
2340   }
2341 
2342   if (flags & LIR_OpArrayCopy::src_range_check) {
2343     __ addw(tmp, src_pos, length);
2344     __ ldrw(rscratch1, src_length_addr);
2345     __ cmpw(tmp, rscratch1);
2346     __ br(Assembler::HI, *stub->entry());
2347   }
2348   if (flags & LIR_OpArrayCopy::dst_range_check) {
2349     __ addw(tmp, dst_pos, length);
2350     __ ldrw(rscratch1, dst_length_addr);
2351     __ cmpw(tmp, rscratch1);
2352     __ br(Assembler::HI, *stub->entry());
2353   }
2354 
2355   if (flags & LIR_OpArrayCopy::type_check) {
2356     // We don't know the array types are compatible
2357     if (basic_type != T_OBJECT) {
2358       // Simple test for basic type arrays
2359       if (UseCompressedClassPointers) {
2360         __ ldrw(tmp, src_klass_addr);
2361         __ ldrw(rscratch1, dst_klass_addr);
2362         __ cmpw(tmp, rscratch1);
2363       } else {
2364         __ ldr(tmp, src_klass_addr);
2365         __ ldr(rscratch1, dst_klass_addr);
2366         __ cmp(tmp, rscratch1);
2367       }
2368       __ br(Assembler::NE, *stub->entry());
2369     } else {
2370       // For object arrays, if src is a sub class of dst then we can
2371       // safely do the copy.
2372       Label cont, slow;
2373 
2374 #define PUSH(r1, r2)                                    \
2375       stp(r1, r2, __ pre(sp, -2 * wordSize));
2376 
2377 #define POP(r1, r2)                                     \
2378       ldp(r1, r2, __ post(sp, 2 * wordSize));
2379 
2380       __ PUSH(src, dst);
2381 
2382       __ load_klass(src, src);
2383       __ load_klass(dst, dst);
2384 
2385       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2386 
2387       __ PUSH(src, dst);
2388       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2389       __ POP(src, dst);
2390 
2391       __ cbnz(src, cont);
2392 
2393       __ bind(slow);
2394       __ POP(src, dst);
2395 
2396       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2397       if (copyfunc_addr != NULL) { // use stub if available
2398         // src is not a sub class of dst so we have to do a
2399         // per-element check.
2400 
2401         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2402         if ((flags & mask) != mask) {
2403           // Check that at least both of them object arrays.
2404           assert(flags & mask, "one of the two should be known to be an object array");
2405 
2406           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2407             __ load_klass(tmp, src);
2408           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2409             __ load_klass(tmp, dst);
2410           }
2411           int lh_offset = in_bytes(Klass::layout_helper_offset());
2412           Address klass_lh_addr(tmp, lh_offset);
2413           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2414           __ ldrw(rscratch1, klass_lh_addr);
2415           __ mov(rscratch2, objArray_lh);
2416           __ eorw(rscratch1, rscratch1, rscratch2);
2417           __ cbnzw(rscratch1, *stub->entry());
2418         }
2419 
2420        // Spill because stubs can use any register they like and it's
2421        // easier to restore just those that we care about.
2422         __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2423         __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2424         __ str(src,              Address(sp, 4*BytesPerWord));
2425 
2426         __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2427         __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2428         assert_different_registers(c_rarg0, dst, dst_pos, length);
2429         __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2430         __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2431         assert_different_registers(c_rarg1, dst, length);
2432         __ uxtw(c_rarg2, length);
2433         assert_different_registers(c_rarg2, dst);
2434 
2435         __ load_klass(c_rarg4, dst);
2436         __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2437         __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2438         __ far_call(RuntimeAddress(copyfunc_addr));
2439 
2440 #ifndef PRODUCT
2441         if (PrintC1Statistics) {
2442           Label failed;
2443           __ cbnz(r0, failed);
2444           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2445           __ bind(failed);
2446         }
2447 #endif
2448 
2449         __ cbz(r0, *stub->continuation());
2450 
2451 #ifndef PRODUCT
2452         if (PrintC1Statistics) {
2453           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2454         }
2455 #endif
2456         assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2457 
2458         // Restore previously spilled arguments
2459         __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2460         __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2461         __ ldr(src,              Address(sp, 4*BytesPerWord));
2462 
2463         // return value is -1^K where K is partial copied count
2464         __ eonw(rscratch1, r0, zr);
2465         // adjust length down and src/end pos up by partial copied count
2466         __ subw(length, length, rscratch1);
2467         __ addw(src_pos, src_pos, rscratch1);
2468         __ addw(dst_pos, dst_pos, rscratch1);
2469       }
2470 
2471       __ b(*stub->entry());
2472 
2473       __ bind(cont);
2474       __ POP(src, dst);
2475     }
2476   }
2477 
2478 #ifdef ASSERT
2479   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2480     // Sanity check the known type with the incoming class.  For the
2481     // primitive case the types must match exactly with src.klass and
2482     // dst.klass each exactly matching the default type.  For the
2483     // object array case, if no type check is needed then either the
2484     // dst type is exactly the expected type and the src type is a
2485     // subtype which we can't check or src is the same array as dst
2486     // but not necessarily exactly of type default_type.
2487     Label known_ok, halt;
2488     __ mov_metadata(tmp, default_type->constant_encoding());
2489     if (UseCompressedClassPointers) {
2490       __ encode_klass_not_null(tmp);
2491     }
2492 
2493     if (basic_type != T_OBJECT) {
2494 
2495       if (UseCompressedClassPointers) {
2496         __ ldrw(rscratch1, dst_klass_addr);
2497         __ cmpw(tmp, rscratch1);
2498       } else {
2499         __ ldr(rscratch1, dst_klass_addr);
2500         __ cmp(tmp, rscratch1);
2501       }
2502       __ br(Assembler::NE, halt);
2503       if (UseCompressedClassPointers) {
2504         __ ldrw(rscratch1, src_klass_addr);
2505         __ cmpw(tmp, rscratch1);
2506       } else {
2507         __ ldr(rscratch1, src_klass_addr);
2508         __ cmp(tmp, rscratch1);
2509       }
2510       __ br(Assembler::EQ, known_ok);
2511     } else {
2512       if (UseCompressedClassPointers) {
2513         __ ldrw(rscratch1, dst_klass_addr);
2514         __ cmpw(tmp, rscratch1);
2515       } else {
2516         __ ldr(rscratch1, dst_klass_addr);
2517         __ cmp(tmp, rscratch1);
2518       }
2519       __ br(Assembler::EQ, known_ok);
2520       __ cmp(src, dst);
2521       __ br(Assembler::EQ, known_ok);
2522     }
2523     __ bind(halt);
2524     __ stop("incorrect type information in arraycopy");
2525     __ bind(known_ok);
2526   }
2527 #endif
2528 
2529 #ifndef PRODUCT
2530   if (PrintC1Statistics) {
2531     __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2532   }
2533 #endif
2534 
2535   __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2536   __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2537   assert_different_registers(c_rarg0, dst, dst_pos, length);
2538   __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2539   __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2540   assert_different_registers(c_rarg1, dst, length);
2541   __ uxtw(c_rarg2, length);
2542   assert_different_registers(c_rarg2, dst);
2543 
2544   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2545   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2546   const char *name;
2547   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2548 
2549  CodeBlob *cb = CodeCache::find_blob(entry);
2550  if (cb) {
2551    __ far_call(RuntimeAddress(entry));
2552  } else {
2553    __ call_VM_leaf(entry, 3);
2554  }
2555 
2556   __ bind(*stub->continuation());
2557 }
2558 
2559 
2560 
2561 
2562 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2563   Register obj = op->obj_opr()->as_register();  // may not be an oop
2564   Register hdr = op->hdr_opr()->as_register();
2565   Register lock = op->lock_opr()->as_register();
2566   if (!UseFastLocking) {
2567     __ b(*op->stub()->entry());
2568   } else if (op->code() == lir_lock) {
2569     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2570     // add debug info for NullPointerException only if one is possible
2571     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
2572     if (op->info() != NULL) {
2573       add_debug_info_for_null_check(null_check_offset, op->info());
2574     }
2575     // done
2576   } else if (op->code() == lir_unlock) {
2577     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2578     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2579   } else {
2580     Unimplemented();
2581   }
2582   __ bind(*op->stub()->continuation());
2583 }
2584 
2585 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2586   Register obj = op->obj()->as_pointer_register();
2587   Register result = op->result_opr()->as_pointer_register();
2588 
2589   CodeEmitInfo* info = op->info();
2590   if (info != NULL) {
2591     add_debug_info_for_null_check_here(info);
2592   }
2593 
2594   if (UseCompressedClassPointers) {
2595     __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes()));
2596     __ decode_klass_not_null(result);
2597   } else {
2598     __ ldr(result, Address (obj, oopDesc::klass_offset_in_bytes()));
2599   }
2600 }
2601 
2602 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2603   ciMethod* method = op->profiled_method();
2604   int bci          = op->profiled_bci();
2605   ciMethod* callee = op->profiled_callee();
2606 
2607   // Update counter for all call types
2608   ciMethodData* md = method->method_data_or_null();
2609   assert(md != NULL, "Sanity");
2610   ciProfileData* data = md->bci_to_data(bci);
2611   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2612   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2613   Register mdo  = op->mdo()->as_register();
2614   __ mov_metadata(mdo, md->constant_encoding());
2615   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2616   // Perform additional virtual call profiling for invokevirtual and
2617   // invokeinterface bytecodes
2618   if (op->should_profile_receiver_type()) {
2619     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2620     Register recv = op->recv()->as_register();
2621     assert_different_registers(mdo, recv);
2622     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2623     ciKlass* known_klass = op->known_holder();
2624     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2625       // We know the type that will be seen at this call site; we can
2626       // statically update the MethodData* rather than needing to do
2627       // dynamic tests on the receiver type
2628 
2629       // NOTE: we should probably put a lock around this search to
2630       // avoid collisions by concurrent compilations
2631       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2632       uint i;
2633       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2634         ciKlass* receiver = vc_data->receiver(i);
2635         if (known_klass->equals(receiver)) {
2636           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2637           __ addptr(data_addr, DataLayout::counter_increment);
2638           return;
2639         }
2640       }
2641 
2642       // Receiver type not found in profile data; select an empty slot
2643 
2644       // Note that this is less efficient than it should be because it
2645       // always does a write to the receiver part of the
2646       // VirtualCallData rather than just the first time
2647       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2648         ciKlass* receiver = vc_data->receiver(i);
2649         if (receiver == NULL) {
2650           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2651           __ mov_metadata(rscratch1, known_klass->constant_encoding());
2652           __ lea(rscratch2, recv_addr);
2653           __ str(rscratch1, Address(rscratch2));
2654           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2655           __ addptr(data_addr, DataLayout::counter_increment);
2656           return;
2657         }
2658       }
2659     } else {
2660       __ load_klass(recv, recv);
2661       Label update_done;
2662       type_profile_helper(mdo, md, data, recv, &update_done);
2663       // Receiver did not match any saved receiver and there is no empty row for it.
2664       // Increment total counter to indicate polymorphic case.
2665       __ addptr(counter_addr, DataLayout::counter_increment);
2666 
2667       __ bind(update_done);
2668     }
2669   } else {
2670     // Static call
2671     __ addptr(counter_addr, DataLayout::counter_increment);
2672   }
2673 }
2674 
2675 
2676 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2677   Unimplemented();
2678 }
2679 
2680 
2681 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2682   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2683 }
2684 
2685 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2686   assert(op->crc()->is_single_cpu(),  "crc must be register");
2687   assert(op->val()->is_single_cpu(),  "byte value must be register");
2688   assert(op->result_opr()->is_single_cpu(), "result must be register");
2689   Register crc = op->crc()->as_register();
2690   Register val = op->val()->as_register();
2691   Register res = op->result_opr()->as_register();
2692 
2693   assert_different_registers(val, crc, res);
2694   uint64_t offset;
2695   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2696   if (offset) __ add(res, res, offset);
2697 
2698   __ mvnw(crc, crc); // ~crc
2699   __ update_byte_crc32(crc, val, res);
2700   __ mvnw(res, crc); // ~crc
2701 }
2702 
2703 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2704   COMMENT("emit_profile_type {");
2705   Register obj = op->obj()->as_register();
2706   Register tmp = op->tmp()->as_pointer_register();
2707   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2708   ciKlass* exact_klass = op->exact_klass();
2709   intptr_t current_klass = op->current_klass();
2710   bool not_null = op->not_null();
2711   bool no_conflict = op->no_conflict();
2712 
2713   Label update, next, none;
2714 
2715   bool do_null = !not_null;
2716   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2717   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2718 
2719   assert(do_null || do_update, "why are we here?");
2720   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2721   assert(mdo_addr.base() != rscratch1, "wrong register");
2722 
2723   __ verify_oop(obj);
2724 
2725   if (tmp != obj) {
2726     __ mov(tmp, obj);
2727   }
2728   if (do_null) {
2729     __ cbnz(tmp, update);
2730     if (!TypeEntries::was_null_seen(current_klass)) {
2731       __ ldr(rscratch2, mdo_addr);
2732       __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2733       __ str(rscratch2, mdo_addr);
2734     }
2735     if (do_update) {
2736 #ifndef ASSERT
2737       __ b(next);
2738     }
2739 #else
2740       __ b(next);
2741     }
2742   } else {
2743     __ cbnz(tmp, update);
2744     __ stop("unexpected null obj");
2745 #endif
2746   }
2747 
2748   __ bind(update);
2749 
2750   if (do_update) {
2751 #ifdef ASSERT
2752     if (exact_klass != NULL) {
2753       Label ok;
2754       __ load_klass(tmp, tmp);
2755       __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2756       __ eor(rscratch1, tmp, rscratch1);
2757       __ cbz(rscratch1, ok);
2758       __ stop("exact klass and actual klass differ");
2759       __ bind(ok);
2760     }
2761 #endif
2762     if (!no_conflict) {
2763       if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2764         if (exact_klass != NULL) {
2765           __ mov_metadata(tmp, exact_klass->constant_encoding());
2766         } else {
2767           __ load_klass(tmp, tmp);
2768         }
2769 
2770         __ ldr(rscratch2, mdo_addr);
2771         __ eor(tmp, tmp, rscratch2);
2772         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2773         // klass seen before, nothing to do. The unknown bit may have been
2774         // set already but no need to check.
2775         __ cbz(rscratch1, next);
2776 
2777         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2778 
2779         if (TypeEntries::is_type_none(current_klass)) {
2780           __ cbz(rscratch2, none);
2781           __ cmp(rscratch2, (u1)TypeEntries::null_seen);
2782           __ br(Assembler::EQ, none);
2783           // There is a chance that the checks above (re-reading profiling
2784           // data from memory) fail if another thread has just set the
2785           // profiling to this obj's klass
2786           __ dmb(Assembler::ISHLD);
2787           __ ldr(rscratch2, mdo_addr);
2788           __ eor(tmp, tmp, rscratch2);
2789           __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2790           __ cbz(rscratch1, next);
2791         }
2792       } else {
2793         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2794                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2795 
2796         __ ldr(tmp, mdo_addr);
2797         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2798       }
2799 
2800       // different than before. Cannot keep accurate profile.
2801       __ ldr(rscratch2, mdo_addr);
2802       __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2803       __ str(rscratch2, mdo_addr);
2804 
2805       if (TypeEntries::is_type_none(current_klass)) {
2806         __ b(next);
2807 
2808         __ bind(none);
2809         // first time here. Set profile type.
2810         __ str(tmp, mdo_addr);
2811       }
2812     } else {
2813       // There's a single possible klass at this profile point
2814       assert(exact_klass != NULL, "should be");
2815       if (TypeEntries::is_type_none(current_klass)) {
2816         __ mov_metadata(tmp, exact_klass->constant_encoding());
2817         __ ldr(rscratch2, mdo_addr);
2818         __ eor(tmp, tmp, rscratch2);
2819         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2820         __ cbz(rscratch1, next);
2821 #ifdef ASSERT
2822         {
2823           Label ok;
2824           __ ldr(rscratch1, mdo_addr);
2825           __ cbz(rscratch1, ok);
2826           __ cmp(rscratch1, (u1)TypeEntries::null_seen);
2827           __ br(Assembler::EQ, ok);
2828           // may have been set by another thread
2829           __ dmb(Assembler::ISHLD);
2830           __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2831           __ ldr(rscratch2, mdo_addr);
2832           __ eor(rscratch2, rscratch1, rscratch2);
2833           __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2834           __ cbz(rscratch2, ok);
2835 
2836           __ stop("unexpected profiling mismatch");
2837           __ bind(ok);
2838         }
2839 #endif
2840         // first time here. Set profile type.
2841         __ str(tmp, mdo_addr);
2842       } else {
2843         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2844                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2845 
2846         __ ldr(tmp, mdo_addr);
2847         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2848 
2849         __ orr(tmp, tmp, TypeEntries::type_unknown);
2850         __ str(tmp, mdo_addr);
2851         // FIXME: Write barrier needed here?
2852       }
2853     }
2854 
2855     __ bind(next);
2856   }
2857   COMMENT("} emit_profile_type");
2858 }
2859 
2860 
2861 void LIR_Assembler::align_backward_branch_target() {
2862 }
2863 
2864 
2865 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2866   // tmp must be unused
2867   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2868 
2869   if (left->is_single_cpu()) {
2870     assert(dest->is_single_cpu(), "expect single result reg");
2871     __ negw(dest->as_register(), left->as_register());
2872   } else if (left->is_double_cpu()) {
2873     assert(dest->is_double_cpu(), "expect double result reg");
2874     __ neg(dest->as_register_lo(), left->as_register_lo());
2875   } else if (left->is_single_fpu()) {
2876     assert(dest->is_single_fpu(), "expect single float result reg");
2877     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2878   } else {
2879     assert(left->is_double_fpu(), "expect double float operand reg");
2880     assert(dest->is_double_fpu(), "expect double float result reg");
2881     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2882   }
2883 }
2884 
2885 
2886 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2887   if (patch_code != lir_patch_none) {
2888     deoptimize_trap(info);
2889     return;
2890   }
2891 
2892   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2893 }
2894 
2895 
2896 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2897   assert(!tmp->is_valid(), "don't need temporary");
2898 
2899   CodeBlob *cb = CodeCache::find_blob(dest);
2900   if (cb) {
2901     __ far_call(RuntimeAddress(dest));
2902   } else {
2903     __ mov(rscratch1, RuntimeAddress(dest));
2904     __ blr(rscratch1);
2905   }
2906 
2907   if (info != NULL) {
2908     add_call_info_here(info);
2909   }
2910 }
2911 
2912 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2913   if (dest->is_address() || src->is_address()) {
2914     move_op(src, dest, type, lir_patch_none, info,
2915             /*pop_fpu_stack*/false, /*wide*/false);
2916   } else {
2917     ShouldNotReachHere();
2918   }
2919 }
2920 
2921 #ifdef ASSERT
2922 // emit run-time assertion
2923 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2924   assert(op->code() == lir_assert, "must be");
2925 
2926   if (op->in_opr1()->is_valid()) {
2927     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2928     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2929   } else {
2930     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2931     assert(op->condition() == lir_cond_always, "no other conditions allowed");
2932   }
2933 
2934   Label ok;
2935   if (op->condition() != lir_cond_always) {
2936     Assembler::Condition acond = Assembler::AL;
2937     switch (op->condition()) {
2938       case lir_cond_equal:        acond = Assembler::EQ;  break;
2939       case lir_cond_notEqual:     acond = Assembler::NE;  break;
2940       case lir_cond_less:         acond = Assembler::LT;  break;
2941       case lir_cond_lessEqual:    acond = Assembler::LE;  break;
2942       case lir_cond_greaterEqual: acond = Assembler::GE;  break;
2943       case lir_cond_greater:      acond = Assembler::GT;  break;
2944       case lir_cond_belowEqual:   acond = Assembler::LS;  break;
2945       case lir_cond_aboveEqual:   acond = Assembler::HS;  break;
2946       default:                    ShouldNotReachHere();
2947     }
2948     __ br(acond, ok);
2949   }
2950   if (op->halt()) {
2951     const char* str = __ code_string(op->msg());
2952     __ stop(str);
2953   } else {
2954     breakpoint();
2955   }
2956   __ bind(ok);
2957 }
2958 #endif
2959 
2960 #ifndef PRODUCT
2961 #define COMMENT(x)   do { __ block_comment(x); } while (0)
2962 #else
2963 #define COMMENT(x)
2964 #endif
2965 
2966 void LIR_Assembler::membar() {
2967   COMMENT("membar");
2968   __ membar(MacroAssembler::AnyAny);
2969 }
2970 
2971 void LIR_Assembler::membar_acquire() {
2972   __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2973 }
2974 
2975 void LIR_Assembler::membar_release() {
2976   __ membar(Assembler::LoadStore|Assembler::StoreStore);
2977 }
2978 
2979 void LIR_Assembler::membar_loadload() {
2980   __ membar(Assembler::LoadLoad);
2981 }
2982 
2983 void LIR_Assembler::membar_storestore() {
2984   __ membar(MacroAssembler::StoreStore);
2985 }
2986 
2987 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2988 
2989 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2990 
2991 void LIR_Assembler::on_spin_wait() {
2992   __ spin_wait();
2993 }
2994 
2995 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2996   __ mov(result_reg->as_register(), rthread);
2997 }
2998 
2999 
3000 void LIR_Assembler::peephole(LIR_List *lir) {
3001 #if 0
3002   if (tableswitch_count >= max_tableswitches)
3003     return;
3004 
3005   /*
3006     This finite-state automaton recognizes sequences of compare-and-
3007     branch instructions.  We will turn them into a tableswitch.  You
3008     could argue that C1 really shouldn't be doing this sort of
3009     optimization, but without it the code is really horrible.
3010   */
3011 
3012   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3013   int first_key, last_key = -2147483648;
3014   int next_key = 0;
3015   int start_insn = -1;
3016   int last_insn = -1;
3017   Register reg = noreg;
3018   LIR_Opr reg_opr;
3019   state = start_s;
3020 
3021   LIR_OpList* inst = lir->instructions_list();
3022   for (int i = 0; i < inst->length(); i++) {
3023     LIR_Op* op = inst->at(i);
3024     switch (state) {
3025     case start_s:
3026       first_key = -1;
3027       start_insn = i;
3028       switch (op->code()) {
3029       case lir_cmp:
3030         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3031         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3032         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3033             && opr2->is_constant()
3034             && opr2->type() == T_INT) {
3035           reg_opr = opr1;
3036           reg = opr1->as_register();
3037           first_key = opr2->as_constant_ptr()->as_jint();
3038           next_key = first_key + 1;
3039           state = cmp_s;
3040           goto next_state;
3041         }
3042         break;
3043       }
3044       break;
3045     case cmp_s:
3046       switch (op->code()) {
3047       case lir_branch:
3048         if (op->as_OpBranch()->cond() == lir_cond_equal) {
3049           state = beq_s;
3050           last_insn = i;
3051           goto next_state;
3052         }
3053       }
3054       state = start_s;
3055       break;
3056     case beq_s:
3057       switch (op->code()) {
3058       case lir_cmp: {
3059         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3060         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3061         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3062             && opr1->as_register() == reg
3063             && opr2->is_constant()
3064             && opr2->type() == T_INT
3065             && opr2->as_constant_ptr()->as_jint() == next_key) {
3066           last_key = next_key;
3067           next_key++;
3068           state = cmp_s;
3069           goto next_state;
3070         }
3071       }
3072       }
3073       last_key = next_key;
3074       state = start_s;
3075       break;
3076     default:
3077       assert(false, "impossible state");
3078     }
3079     if (state == start_s) {
3080       if (first_key < last_key - 5L && reg != noreg) {
3081         {
3082           // printf("found run register %d starting at insn %d low value %d high value %d\n",
3083           //        reg->encoding(),
3084           //        start_insn, first_key, last_key);
3085           //   for (int i = 0; i < inst->length(); i++) {
3086           //     inst->at(i)->print();
3087           //     tty->print("\n");
3088           //   }
3089           //   tty->print("\n");
3090         }
3091 
3092         struct tableswitch *sw = &switches[tableswitch_count];
3093         sw->_insn_index = start_insn, sw->_first_key = first_key,
3094           sw->_last_key = last_key, sw->_reg = reg;
3095         inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3096         {
3097           // Insert the new table of branches
3098           int offset = last_insn;
3099           for (int n = first_key; n < last_key; n++) {
3100             inst->insert_before
3101               (last_insn + 1,
3102                new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3103                                 inst->at(offset)->as_OpBranch()->label()));
3104             offset -= 2, i++;
3105           }
3106         }
3107         // Delete all the old compare-and-branch instructions
3108         for (int n = first_key; n < last_key; n++) {
3109           inst->remove_at(start_insn);
3110           inst->remove_at(start_insn);
3111         }
3112         // Insert the tableswitch instruction
3113         inst->insert_before(start_insn,
3114                             new LIR_Op2(lir_cmp, lir_cond_always,
3115                                         LIR_OprFact::intConst(tableswitch_count),
3116                                         reg_opr));
3117         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3118         tableswitch_count++;
3119       }
3120       reg = noreg;
3121       last_key = -2147483648;
3122     }
3123   next_state:
3124     ;
3125   }
3126 #endif
3127 }
3128 
3129 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3130   Address addr = as_Address(src->as_address_ptr());
3131   BasicType type = src->type();
3132   bool is_oop = is_reference_type(type);
3133 
3134   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3135   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3136 
3137   switch(type) {
3138   case T_INT:
3139     xchg = &MacroAssembler::atomic_xchgalw;
3140     add = &MacroAssembler::atomic_addalw;
3141     break;
3142   case T_LONG:
3143     xchg = &MacroAssembler::atomic_xchgal;
3144     add = &MacroAssembler::atomic_addal;
3145     break;
3146   case T_OBJECT:
3147   case T_ARRAY:
3148     if (UseCompressedOops) {
3149       xchg = &MacroAssembler::atomic_xchgalw;
3150       add = &MacroAssembler::atomic_addalw;
3151     } else {
3152       xchg = &MacroAssembler::atomic_xchgal;
3153       add = &MacroAssembler::atomic_addal;
3154     }
3155     break;
3156   default:
3157     ShouldNotReachHere();
3158     xchg = &MacroAssembler::atomic_xchgal;
3159     add = &MacroAssembler::atomic_addal; // unreachable
3160   }
3161 
3162   switch (code) {
3163   case lir_xadd:
3164     {
3165       RegisterOrConstant inc;
3166       Register tmp = as_reg(tmp_op);
3167       Register dst = as_reg(dest);
3168       if (data->is_constant()) {
3169         inc = RegisterOrConstant(as_long(data));
3170         assert_different_registers(dst, addr.base(), tmp,
3171                                    rscratch1, rscratch2);
3172       } else {
3173         inc = RegisterOrConstant(as_reg(data));
3174         assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3175                                    rscratch1, rscratch2);
3176       }
3177       __ lea(tmp, addr);
3178       (_masm->*add)(dst, inc, tmp);
3179       break;
3180     }
3181   case lir_xchg:
3182     {
3183       Register tmp = tmp_op->as_register();
3184       Register obj = as_reg(data);
3185       Register dst = as_reg(dest);
3186       if (is_oop && UseCompressedOops) {
3187         __ encode_heap_oop(rscratch2, obj);
3188         obj = rscratch2;
3189       }
3190       assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3191       __ lea(tmp, addr);
3192       (_masm->*xchg)(dst, obj, tmp);
3193       if (is_oop && UseCompressedOops) {
3194         __ decode_heap_oop(dst);
3195       }
3196     }
3197     break;
3198   default:
3199     ShouldNotReachHere();
3200   }
3201   __ membar(__ AnyAny);
3202 }
3203 
3204 #undef __