1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_aarch64.inline.hpp"
  47 
  48 
  49 #ifndef PRODUCT
  50 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  51 #else
  52 #define COMMENT(x)
  53 #endif
  54 
  55 NEEDS_CLEANUP // remove this definitions ?
  56 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 
  63 static void select_different_registers(Register preserve,
  64                                        Register extra,
  65                                        Register &tmp1,
  66                                        Register &tmp2) {
  67   if (tmp1 == preserve) {
  68     assert_different_registers(tmp1, tmp2, extra);
  69     tmp1 = extra;
  70   } else if (tmp2 == preserve) {
  71     assert_different_registers(tmp1, tmp2, extra);
  72     tmp2 = extra;
  73   }
  74   assert_different_registers(preserve, tmp1, tmp2);
  75 }
  76 
  77 
  78 
  79 static void select_different_registers(Register preserve,
  80                                        Register extra,
  81                                        Register &tmp1,
  82                                        Register &tmp2,
  83                                        Register &tmp3) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, tmp3, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, tmp3, extra);
  89     tmp2 = extra;
  90   } else if (tmp3 == preserve) {
  91     assert_different_registers(tmp1, tmp2, tmp3, extra);
  92     tmp3 = extra;
  93   }
  94   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  95 }
  96 
  97 
  98 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  99 
 100 
 101 LIR_Opr LIR_Assembler::receiverOpr() {
 102   return FrameMap::receiver_opr;
 103 }
 104 
 105 LIR_Opr LIR_Assembler::osrBufferPointer() {
 106   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 107 }
 108 
 109 //--------------fpu register translations-----------------------
 110 
 111 
 112 address LIR_Assembler::float_constant(float f) {
 113   address const_addr = __ float_constant(f);
 114   if (const_addr == NULL) {
 115     bailout("const section overflow");
 116     return __ code()->consts()->start();
 117   } else {
 118     return const_addr;
 119   }
 120 }
 121 
 122 
 123 address LIR_Assembler::double_constant(double d) {
 124   address const_addr = __ double_constant(d);
 125   if (const_addr == NULL) {
 126     bailout("const section overflow");
 127     return __ code()->consts()->start();
 128   } else {
 129     return const_addr;
 130   }
 131 }
 132 
 133 address LIR_Assembler::int_constant(jlong n) {
 134   address const_addr = __ long_constant(n);
 135   if (const_addr == NULL) {
 136     bailout("const section overflow");
 137     return __ code()->consts()->start();
 138   } else {
 139     return const_addr;
 140   }
 141 }
 142 
 143 void LIR_Assembler::breakpoint() { Unimplemented(); }
 144 
 145 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 146 
 147 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 148 
 149 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
 150 //-------------------------------------------
 151 
 152 static Register as_reg(LIR_Opr op) {
 153   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 154 }
 155 
 156 static jlong as_long(LIR_Opr data) {
 157   jlong result;
 158   switch (data->type()) {
 159   case T_INT:
 160     result = (data->as_jint());
 161     break;
 162   case T_LONG:
 163     result = (data->as_jlong());
 164     break;
 165   default:
 166     ShouldNotReachHere();
 167     result = 0;  // unreachable
 168   }
 169   return result;
 170 }
 171 
 172 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 173   Register base = addr->base()->as_pointer_register();
 174   LIR_Opr opr = addr->index();
 175   if (opr->is_cpu_register()) {
 176     Register index;
 177     if (opr->is_single_cpu())
 178       index = opr->as_register();
 179     else
 180       index = opr->as_register_lo();
 181     assert(addr->disp() == 0, "must be");
 182     switch(opr->type()) {
 183       case T_INT:
 184         return Address(base, index, Address::sxtw(addr->scale()));
 185       case T_LONG:
 186         return Address(base, index, Address::lsl(addr->scale()));
 187       default:
 188         ShouldNotReachHere();
 189       }
 190   } else {
 191     assert(addr->scale() == 0,
 192            "expected for immediate operand, was: %d", addr->scale());
 193     ptrdiff_t offset = ptrdiff_t(addr->disp());
 194     // NOTE: Does not handle any 16 byte vector access.
 195     const uint type_size = type2aelembytes(addr->type(), true);
 196     return __ legitimize_address(Address(base, offset), type_size, tmp);
 197   }
 198   return Address();
 199 }
 200 
 201 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 202   ShouldNotReachHere();
 203   return Address();
 204 }
 205 
 206 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 207   return as_Address(addr, rscratch1);
 208 }
 209 
 210 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 211   return as_Address(addr, rscratch1);  // Ouch
 212   // FIXME: This needs to be much more clever.  See x86.
 213 }
 214 
 215 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
 216 // not encodable as a base + (immediate) offset, generate an explicit address
 217 // calculation to hold the address in a temporary register.
 218 Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
 219   precond(size == 4 || size == 8);
 220   Address addr = frame_map()->address_for_slot(index, adjust);
 221   precond(addr.getMode() == Address::base_plus_offset);
 222   precond(addr.base() == sp);
 223   precond(addr.offset() > 0);
 224   uint mask = size - 1;
 225   assert((addr.offset() & mask) == 0, "scaled offsets only");
 226   return __ legitimize_address(addr, size, tmp);
 227 }
 228 
 229 void LIR_Assembler::osr_entry() {
 230   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 231   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 232   ValueStack* entry_state = osr_entry->state();
 233   int number_of_locks = entry_state->locks_size();
 234 
 235   // we jump here if osr happens with the interpreter
 236   // state set up to continue at the beginning of the
 237   // loop that triggered osr - in particular, we have
 238   // the following registers setup:
 239   //
 240   // r2: osr buffer
 241   //
 242 
 243   // build frame
 244   ciMethod* m = compilation()->method();
 245   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 246 
 247   // OSR buffer is
 248   //
 249   // locals[nlocals-1..0]
 250   // monitors[0..number_of_locks]
 251   //
 252   // locals is a direct copy of the interpreter frame so in the osr buffer
 253   // so first slot in the local array is the last local from the interpreter
 254   // and last slot is local[0] (receiver) from the interpreter
 255   //
 256   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 257   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 258   // in the interpreter frame (the method lock if a sync method)
 259 
 260   // Initialize monitors in the compiled activation.
 261   //   r2: pointer to osr buffer
 262   //
 263   // All other registers are dead at this point and the locals will be
 264   // copied into place by code emitted in the IR.
 265 
 266   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 267   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 268     int monitor_offset = BytesPerWord * method()->max_locals() +
 269       (2 * BytesPerWord) * (number_of_locks - 1);
 270     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 271     // the OSR buffer using 2 word entries: first the lock and then
 272     // the oop.
 273     for (int i = 0; i < number_of_locks; i++) {
 274       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 275 #ifdef ASSERT
 276       // verify the interpreter's monitor has a non-null object
 277       {
 278         Label L;
 279         __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 280         __ cbnz(rscratch1, L);
 281         __ stop("locked object is NULL");
 282         __ bind(L);
 283       }
 284 #endif
 285       __ ldp(r19, r20, Address(OSR_buf, slot_offset));
 286       __ str(r19, frame_map()->address_for_monitor_lock(i));
 287       __ str(r20, frame_map()->address_for_monitor_object(i));
 288     }
 289   }
 290 }
 291 
 292 
 293 // inline cache check; done before the frame is built.
 294 int LIR_Assembler::check_icache() {
 295   Register receiver = FrameMap::receiver_opr->as_register();
 296   Register ic_klass = IC_Klass;
 297   int start_offset = __ offset();
 298   __ inline_cache_check(receiver, ic_klass);
 299 
 300   // if icache check fails, then jump to runtime routine
 301   // Note: RECEIVER must still contain the receiver!
 302   Label dont;
 303   __ br(Assembler::EQ, dont);
 304   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 305 
 306   // We align the verified entry point unless the method body
 307   // (including its inline cache check) will fit in a single 64-byte
 308   // icache line.
 309   if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
 310     // force alignment after the cache check.
 311     __ align(CodeEntryAlignment);
 312   }
 313 
 314   __ bind(dont);
 315   return start_offset;
 316 }
 317 
 318 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 319   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 320   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
 321 
 322   Label L_skip_barrier;
 323 
 324   __ mov_metadata(rscratch2, method->holder()->constant_encoding());
 325   __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);
 326   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 327   __ bind(L_skip_barrier);
 328 }
 329 
 330 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 331   if (o == NULL) {
 332     __ mov(reg, zr);
 333   } else {
 334     __ movoop(reg, o, /*immediate*/true);
 335   }
 336 }
 337 
 338 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 339   address target = NULL;
 340   relocInfo::relocType reloc_type = relocInfo::none;
 341 
 342   switch (patching_id(info)) {
 343   case PatchingStub::access_field_id:
 344     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 345     reloc_type = relocInfo::section_word_type;
 346     break;
 347   case PatchingStub::load_klass_id:
 348     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 349     reloc_type = relocInfo::metadata_type;
 350     break;
 351   case PatchingStub::load_mirror_id:
 352     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 353     reloc_type = relocInfo::oop_type;
 354     break;
 355   case PatchingStub::load_appendix_id:
 356     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 357     reloc_type = relocInfo::oop_type;
 358     break;
 359   default: ShouldNotReachHere();
 360   }
 361 
 362   __ far_call(RuntimeAddress(target));
 363   add_call_info_here(info);
 364 }
 365 
 366 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 367   deoptimize_trap(info);
 368 }
 369 
 370 
 371 // This specifies the rsp decrement needed to build the frame
 372 int LIR_Assembler::initial_frame_size_in_bytes() const {
 373   // if rounding, must let FrameMap know!
 374 
 375   return in_bytes(frame_map()->framesize_in_bytes());
 376 }
 377 
 378 
 379 int LIR_Assembler::emit_exception_handler() {
 380   // if the last instruction is a call (typically to do a throw which
 381   // is coming at the end after block reordering) the return address
 382   // must still point into the code area in order to avoid assertion
 383   // failures when searching for the corresponding bci => add a nop
 384   // (was bug 5/14/1999 - gri)
 385   __ nop();
 386 
 387   // generate code for exception handler
 388   address handler_base = __ start_a_stub(exception_handler_size());
 389   if (handler_base == NULL) {
 390     // not enough space left for the handler
 391     bailout("exception handler overflow");
 392     return -1;
 393   }
 394 
 395   int offset = code_offset();
 396 
 397   // the exception oop and pc are in r0, and r3
 398   // no other registers need to be preserved, so invalidate them
 399   __ invalidate_registers(false, true, true, false, true, true);
 400 
 401   // check that there is really an exception
 402   __ verify_not_null_oop(r0);
 403 
 404   // search an exception handler (r0: exception oop, r3: throwing pc)
 405   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));  __ should_not_reach_here();
 406   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 407   __ end_a_stub();
 408 
 409   return offset;
 410 }
 411 
 412 
 413 // Emit the code to remove the frame from the stack in the exception
 414 // unwind path.
 415 int LIR_Assembler::emit_unwind_handler() {
 416 #ifndef PRODUCT
 417   if (CommentedAssembly) {
 418     _masm->block_comment("Unwind handler");
 419   }
 420 #endif
 421 
 422   int offset = code_offset();
 423 
 424   // Fetch the exception from TLS and clear out exception related thread state
 425   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 426   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 427   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 428 
 429   __ bind(_unwind_handler_entry);
 430   __ verify_not_null_oop(r0);
 431   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 432     __ mov(r19, r0);  // Preserve the exception
 433   }
 434 
 435   // Preform needed unlocking
 436   MonitorExitStub* stub = NULL;
 437   if (method()->is_synchronized()) {
 438     monitor_address(0, FrameMap::r0_opr);
 439     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 440     __ unlock_object(r5, r4, r0, *stub->entry());
 441     __ bind(*stub->continuation());
 442   }
 443 
 444   if (compilation()->env()->dtrace_method_probes()) {
 445     __ mov(c_rarg0, rthread);
 446     __ mov_metadata(c_rarg1, method()->constant_encoding());
 447     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 448   }
 449 
 450   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 451     __ mov(r0, r19);  // Restore the exception
 452   }
 453 
 454   // remove the activation and dispatch to the unwind handler
 455   __ block_comment("remove_frame and dispatch to the unwind handler");
 456   __ remove_frame(initial_frame_size_in_bytes());
 457   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 458 
 459   // Emit the slow path assembly
 460   if (stub != NULL) {
 461     stub->emit_code(this);
 462   }
 463 
 464   return offset;
 465 }
 466 
 467 
 468 int LIR_Assembler::emit_deopt_handler() {
 469   // if the last instruction is a call (typically to do a throw which
 470   // is coming at the end after block reordering) the return address
 471   // must still point into the code area in order to avoid assertion
 472   // failures when searching for the corresponding bci => add a nop
 473   // (was bug 5/14/1999 - gri)
 474   __ nop();
 475 
 476   // generate code for exception handler
 477   address handler_base = __ start_a_stub(deopt_handler_size());
 478   if (handler_base == NULL) {
 479     // not enough space left for the handler
 480     bailout("deopt handler overflow");
 481     return -1;
 482   }
 483 
 484   int offset = code_offset();
 485 
 486   __ adr(lr, pc());
 487   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 488   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 489   __ end_a_stub();
 490 
 491   return offset;
 492 }
 493 
 494 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 495   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 496   int pc_offset = code_offset();
 497   flush_debug_info(pc_offset);
 498   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 499   if (info->exception_handlers() != NULL) {
 500     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 501   }
 502 }
 503 
 504 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 505   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 506 
 507   // Pop the stack before the safepoint code
 508   __ remove_frame(initial_frame_size_in_bytes());
 509 
 510   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 511     __ reserved_stack_check();
 512   }
 513 
 514   code_stub->set_safepoint_offset(__ offset());
 515   __ relocate(relocInfo::poll_return_type);
 516   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 517   __ ret(lr);
 518 }
 519 
 520 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 521   guarantee(info != NULL, "Shouldn't be NULL");
 522   __ get_polling_page(rscratch1, relocInfo::poll_type);
 523   add_debug_info_for_branch(info);  // This isn't just debug info:
 524                                     // it's the oop map
 525   __ read_polling_page(rscratch1, relocInfo::poll_type);
 526   return __ offset();
 527 }
 528 
 529 
 530 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 531   if (from_reg == r31_sp)
 532     from_reg = sp;
 533   if (to_reg == r31_sp)
 534     to_reg = sp;
 535   __ mov(to_reg, from_reg);
 536 }
 537 
 538 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 539 
 540 
 541 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 542   assert(src->is_constant(), "should not call otherwise");
 543   assert(dest->is_register(), "should not call otherwise");
 544   LIR_Const* c = src->as_constant_ptr();
 545 
 546   switch (c->type()) {
 547     case T_INT: {
 548       assert(patch_code == lir_patch_none, "no patching handled here");
 549       __ movw(dest->as_register(), c->as_jint());
 550       break;
 551     }
 552 
 553     case T_ADDRESS: {
 554       assert(patch_code == lir_patch_none, "no patching handled here");
 555       __ mov(dest->as_register(), c->as_jint());
 556       break;
 557     }
 558 
 559     case T_LONG: {
 560       assert(patch_code == lir_patch_none, "no patching handled here");
 561       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 562       break;
 563     }
 564 
 565     case T_OBJECT: {
 566         if (patch_code == lir_patch_none) {
 567           jobject2reg(c->as_jobject(), dest->as_register());
 568         } else {
 569           jobject2reg_with_patching(dest->as_register(), info);
 570         }
 571       break;
 572     }
 573 
 574     case T_METADATA: {
 575       if (patch_code != lir_patch_none) {
 576         klass2reg_with_patching(dest->as_register(), info);
 577       } else {
 578         __ mov_metadata(dest->as_register(), c->as_metadata());
 579       }
 580       break;
 581     }
 582 
 583     case T_FLOAT: {
 584       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 585         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 586       } else {
 587         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 588         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 589       }
 590       break;
 591     }
 592 
 593     case T_DOUBLE: {
 594       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 595         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 596       } else {
 597         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 598         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 599       }
 600       break;
 601     }
 602 
 603     default:
 604       ShouldNotReachHere();
 605   }
 606 }
 607 
 608 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 609   LIR_Const* c = src->as_constant_ptr();
 610   switch (c->type()) {
 611   case T_OBJECT:
 612     {
 613       if (! c->as_jobject())
 614         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 615       else {
 616         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 617         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 618       }
 619     }
 620     break;
 621   case T_ADDRESS:
 622     {
 623       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 624       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 625     }
 626   case T_INT:
 627   case T_FLOAT:
 628     {
 629       Register reg = zr;
 630       if (c->as_jint_bits() == 0)
 631         __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 632       else {
 633         __ movw(rscratch1, c->as_jint_bits());
 634         __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
 635       }
 636     }
 637     break;
 638   case T_LONG:
 639   case T_DOUBLE:
 640     {
 641       Register reg = zr;
 642       if (c->as_jlong_bits() == 0)
 643         __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 644                                                  lo_word_offset_in_bytes));
 645       else {
 646         __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
 647         __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
 648                                                         lo_word_offset_in_bytes));
 649       }
 650     }
 651     break;
 652   default:
 653     ShouldNotReachHere();
 654   }
 655 }
 656 
 657 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 658   assert(src->is_constant(), "should not call otherwise");
 659   LIR_Const* c = src->as_constant_ptr();
 660   LIR_Address* to_addr = dest->as_address_ptr();
 661 
 662   void (Assembler::* insn)(Register Rt, const Address &adr);
 663 
 664   switch (type) {
 665   case T_ADDRESS:
 666     assert(c->as_jint() == 0, "should be");
 667     insn = &Assembler::str;
 668     break;
 669   case T_LONG:
 670     assert(c->as_jlong() == 0, "should be");
 671     insn = &Assembler::str;
 672     break;
 673   case T_INT:
 674     assert(c->as_jint() == 0, "should be");
 675     insn = &Assembler::strw;
 676     break;
 677   case T_OBJECT:
 678   case T_ARRAY:
 679     assert(c->as_jobject() == 0, "should be");
 680     if (UseCompressedOops && !wide) {
 681       insn = &Assembler::strw;
 682     } else {
 683       insn = &Assembler::str;
 684     }
 685     break;
 686   case T_CHAR:
 687   case T_SHORT:
 688     assert(c->as_jint() == 0, "should be");
 689     insn = &Assembler::strh;
 690     break;
 691   case T_BOOLEAN:
 692   case T_BYTE:
 693     assert(c->as_jint() == 0, "should be");
 694     insn = &Assembler::strb;
 695     break;
 696   default:
 697     ShouldNotReachHere();
 698     insn = &Assembler::str;  // unreachable
 699   }
 700 
 701   if (info) add_debug_info_for_null_check_here(info);
 702   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 703 }
 704 
 705 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 706   assert(src->is_register(), "should not call otherwise");
 707   assert(dest->is_register(), "should not call otherwise");
 708 
 709   // move between cpu-registers
 710   if (dest->is_single_cpu()) {
 711     if (src->type() == T_LONG) {
 712       // Can do LONG -> OBJECT
 713       move_regs(src->as_register_lo(), dest->as_register());
 714       return;
 715     }
 716     assert(src->is_single_cpu(), "must match");
 717     if (src->type() == T_OBJECT) {
 718       __ verify_oop(src->as_register());
 719     }
 720     move_regs(src->as_register(), dest->as_register());
 721 
 722   } else if (dest->is_double_cpu()) {
 723     if (is_reference_type(src->type())) {
 724       // Surprising to me but we can see move of a long to t_object
 725       __ verify_oop(src->as_register());
 726       move_regs(src->as_register(), dest->as_register_lo());
 727       return;
 728     }
 729     assert(src->is_double_cpu(), "must match");
 730     Register f_lo = src->as_register_lo();
 731     Register f_hi = src->as_register_hi();
 732     Register t_lo = dest->as_register_lo();
 733     Register t_hi = dest->as_register_hi();
 734     assert(f_hi == f_lo, "must be same");
 735     assert(t_hi == t_lo, "must be same");
 736     move_regs(f_lo, t_lo);
 737 
 738   } else if (dest->is_single_fpu()) {
 739     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 740 
 741   } else if (dest->is_double_fpu()) {
 742     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 743 
 744   } else {
 745     ShouldNotReachHere();
 746   }
 747 }
 748 
 749 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 750   precond(src->is_register() && dest->is_stack());
 751 
 752   uint const c_sz32 = sizeof(uint32_t);
 753   uint const c_sz64 = sizeof(uint64_t);
 754 
 755   if (src->is_single_cpu()) {
 756     int index = dest->single_stack_ix();
 757     if (is_reference_type(type)) {
 758       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 759       __ verify_oop(src->as_register());
 760     } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
 761       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 762     } else {
 763       __ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 764     }
 765 
 766   } else if (src->is_double_cpu()) {
 767     int index = dest->double_stack_ix();
 768     Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 769     __ str(src->as_register_lo(), dest_addr_LO);
 770 
 771   } else if (src->is_single_fpu()) {
 772     int index = dest->single_stack_ix();
 773     __ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 774 
 775   } else if (src->is_double_fpu()) {
 776     int index = dest->double_stack_ix();
 777     __ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 778 
 779   } else {
 780     ShouldNotReachHere();
 781   }
 782 }
 783 
 784 
 785 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 786   LIR_Address* to_addr = dest->as_address_ptr();
 787   PatchingStub* patch = NULL;
 788   Register compressed_src = rscratch1;
 789 
 790   if (patch_code != lir_patch_none) {
 791     deoptimize_trap(info);
 792     return;
 793   }
 794 
 795   if (is_reference_type(type)) {
 796     __ verify_oop(src->as_register());
 797 
 798     if (UseCompressedOops && !wide) {
 799       __ encode_heap_oop(compressed_src, src->as_register());
 800     } else {
 801       compressed_src = src->as_register();
 802     }
 803   }
 804 
 805   int null_check_here = code_offset();
 806   switch (type) {
 807     case T_FLOAT: {
 808       __ strs(src->as_float_reg(), as_Address(to_addr));
 809       break;
 810     }
 811 
 812     case T_DOUBLE: {
 813       __ strd(src->as_double_reg(), as_Address(to_addr));
 814       break;
 815     }
 816 
 817     case T_ARRAY:   // fall through
 818     case T_OBJECT:  // fall through
 819       if (UseCompressedOops && !wide) {
 820         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 821       } else {
 822          __ str(compressed_src, as_Address(to_addr));
 823       }
 824       break;
 825     case T_METADATA:
 826       // We get here to store a method pointer to the stack to pass to
 827       // a dtrace runtime call. This can't work on 64 bit with
 828       // compressed klass ptrs: T_METADATA can be a compressed klass
 829       // ptr or a 64 bit method pointer.
 830       ShouldNotReachHere();
 831       __ str(src->as_register(), as_Address(to_addr));
 832       break;
 833     case T_ADDRESS:
 834       __ str(src->as_register(), as_Address(to_addr));
 835       break;
 836     case T_INT:
 837       __ strw(src->as_register(), as_Address(to_addr));
 838       break;
 839 
 840     case T_LONG: {
 841       __ str(src->as_register_lo(), as_Address_lo(to_addr));
 842       break;
 843     }
 844 
 845     case T_BYTE:    // fall through
 846     case T_BOOLEAN: {
 847       __ strb(src->as_register(), as_Address(to_addr));
 848       break;
 849     }
 850 
 851     case T_CHAR:    // fall through
 852     case T_SHORT:
 853       __ strh(src->as_register(), as_Address(to_addr));
 854       break;
 855 
 856     default:
 857       ShouldNotReachHere();
 858   }
 859   if (info != NULL) {
 860     add_debug_info_for_null_check(null_check_here, info);
 861   }
 862 }
 863 
 864 
 865 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 866   precond(src->is_stack() && dest->is_register());
 867 
 868   uint const c_sz32 = sizeof(uint32_t);
 869   uint const c_sz64 = sizeof(uint64_t);
 870 
 871   if (dest->is_single_cpu()) {
 872     int index = src->single_stack_ix();
 873     if (is_reference_type(type)) {
 874       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 875       __ verify_oop(dest->as_register());
 876     } else if (type == T_METADATA || type == T_ADDRESS) {
 877       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 878     } else {
 879       __ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 880     }
 881 
 882   } else if (dest->is_double_cpu()) {
 883     int index = src->double_stack_ix();
 884     Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 885     __ ldr(dest->as_register_lo(), src_addr_LO);
 886 
 887   } else if (dest->is_single_fpu()) {
 888     int index = src->single_stack_ix();
 889     __ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 890 
 891   } else if (dest->is_double_fpu()) {
 892     int index = src->double_stack_ix();
 893     __ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 894 
 895   } else {
 896     ShouldNotReachHere();
 897   }
 898 }
 899 
 900 
 901 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 902   address target = NULL;
 903   relocInfo::relocType reloc_type = relocInfo::none;
 904 
 905   switch (patching_id(info)) {
 906   case PatchingStub::access_field_id:
 907     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 908     reloc_type = relocInfo::section_word_type;
 909     break;
 910   case PatchingStub::load_klass_id:
 911     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 912     reloc_type = relocInfo::metadata_type;
 913     break;
 914   case PatchingStub::load_mirror_id:
 915     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 916     reloc_type = relocInfo::oop_type;
 917     break;
 918   case PatchingStub::load_appendix_id:
 919     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 920     reloc_type = relocInfo::oop_type;
 921     break;
 922   default: ShouldNotReachHere();
 923   }
 924 
 925   __ far_call(RuntimeAddress(target));
 926   add_call_info_here(info);
 927 }
 928 
 929 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 930 
 931   LIR_Opr temp;
 932   if (type == T_LONG || type == T_DOUBLE)
 933     temp = FrameMap::rscratch1_long_opr;
 934   else
 935     temp = FrameMap::rscratch1_opr;
 936 
 937   stack2reg(src, temp, src->type());
 938   reg2stack(temp, dest, dest->type(), false);
 939 }
 940 
 941 
 942 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
 943   LIR_Address* addr = src->as_address_ptr();
 944   LIR_Address* from_addr = src->as_address_ptr();
 945 
 946   if (addr->base()->type() == T_OBJECT) {
 947     __ verify_oop(addr->base()->as_pointer_register());
 948   }
 949 
 950   if (patch_code != lir_patch_none) {
 951     deoptimize_trap(info);
 952     return;
 953   }
 954 
 955   if (info != NULL) {
 956     add_debug_info_for_null_check_here(info);
 957   }
 958   int null_check_here = code_offset();
 959   switch (type) {
 960     case T_FLOAT: {
 961       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 962       break;
 963     }
 964 
 965     case T_DOUBLE: {
 966       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 967       break;
 968     }
 969 
 970     case T_ARRAY:   // fall through
 971     case T_OBJECT:  // fall through
 972       if (UseCompressedOops && !wide) {
 973         __ ldrw(dest->as_register(), as_Address(from_addr));
 974       } else {
 975          __ ldr(dest->as_register(), as_Address(from_addr));
 976       }
 977       break;
 978     case T_METADATA:
 979       // We get here to store a method pointer to the stack to pass to
 980       // a dtrace runtime call. This can't work on 64 bit with
 981       // compressed klass ptrs: T_METADATA can be a compressed klass
 982       // ptr or a 64 bit method pointer.
 983       ShouldNotReachHere();
 984       __ ldr(dest->as_register(), as_Address(from_addr));
 985       break;
 986     case T_ADDRESS:
 987       __ ldr(dest->as_register(), as_Address(from_addr));
 988       break;
 989     case T_INT:
 990       __ ldrw(dest->as_register(), as_Address(from_addr));
 991       break;
 992 
 993     case T_LONG: {
 994       __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
 995       break;
 996     }
 997 
 998     case T_BYTE:
 999       __ ldrsb(dest->as_register(), as_Address(from_addr));
1000       break;
1001     case T_BOOLEAN: {
1002       __ ldrb(dest->as_register(), as_Address(from_addr));
1003       break;
1004     }
1005 
1006     case T_CHAR:
1007       __ ldrh(dest->as_register(), as_Address(from_addr));
1008       break;
1009     case T_SHORT:
1010       __ ldrsh(dest->as_register(), as_Address(from_addr));
1011       break;
1012 
1013     default:
1014       ShouldNotReachHere();
1015   }
1016 
1017   if (is_reference_type(type)) {
1018     if (UseCompressedOops && !wide) {
1019       __ decode_heap_oop(dest->as_register());
1020     }
1021 
1022     if (!UseZGC) {
1023       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1024       __ verify_oop(dest->as_register());
1025     }
1026   }
1027 }
1028 
1029 
1030 int LIR_Assembler::array_element_size(BasicType type) const {
1031   int elem_size = type2aelembytes(type);
1032   return exact_log2(elem_size);
1033 }
1034 
1035 
1036 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1037   switch (op->code()) {
1038   case lir_idiv:
1039   case lir_irem:
1040     arithmetic_idiv(op->code(),
1041                     op->in_opr1(),
1042                     op->in_opr2(),
1043                     op->in_opr3(),
1044                     op->result_opr(),
1045                     op->info());
1046     break;
1047   case lir_fmad:
1048     __ fmaddd(op->result_opr()->as_double_reg(),
1049               op->in_opr1()->as_double_reg(),
1050               op->in_opr2()->as_double_reg(),
1051               op->in_opr3()->as_double_reg());
1052     break;
1053   case lir_fmaf:
1054     __ fmadds(op->result_opr()->as_float_reg(),
1055               op->in_opr1()->as_float_reg(),
1056               op->in_opr2()->as_float_reg(),
1057               op->in_opr3()->as_float_reg());
1058     break;
1059   default:      ShouldNotReachHere(); break;
1060   }
1061 }
1062 
1063 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1064 #ifdef ASSERT
1065   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1066   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1067   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1068 #endif
1069 
1070   if (op->cond() == lir_cond_always) {
1071     if (op->info() != NULL) add_debug_info_for_branch(op->info());
1072     __ b(*(op->label()));
1073   } else {
1074     Assembler::Condition acond;
1075     if (op->code() == lir_cond_float_branch) {
1076       bool is_unordered = (op->ublock() == op->block());
1077       // Assembler::EQ does not permit unordered branches, so we add
1078       // another branch here.  Likewise, Assembler::NE does not permit
1079       // ordered branches.
1080       if ((is_unordered && op->cond() == lir_cond_equal)
1081           || (!is_unordered && op->cond() == lir_cond_notEqual))
1082         __ br(Assembler::VS, *(op->ublock()->label()));
1083       switch(op->cond()) {
1084       case lir_cond_equal:        acond = Assembler::EQ; break;
1085       case lir_cond_notEqual:     acond = Assembler::NE; break;
1086       case lir_cond_less:         acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1087       case lir_cond_lessEqual:    acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1088       case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1089       case lir_cond_greater:      acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1090       default:                    ShouldNotReachHere();
1091         acond = Assembler::EQ;  // unreachable
1092       }
1093     } else {
1094       switch (op->cond()) {
1095         case lir_cond_equal:        acond = Assembler::EQ; break;
1096         case lir_cond_notEqual:     acond = Assembler::NE; break;
1097         case lir_cond_less:         acond = Assembler::LT; break;
1098         case lir_cond_lessEqual:    acond = Assembler::LE; break;
1099         case lir_cond_greaterEqual: acond = Assembler::GE; break;
1100         case lir_cond_greater:      acond = Assembler::GT; break;
1101         case lir_cond_belowEqual:   acond = Assembler::LS; break;
1102         case lir_cond_aboveEqual:   acond = Assembler::HS; break;
1103         default:                    ShouldNotReachHere();
1104           acond = Assembler::EQ;  // unreachable
1105       }
1106     }
1107     __ br(acond,*(op->label()));
1108   }
1109 }
1110 
1111 
1112 
1113 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1114   LIR_Opr src  = op->in_opr();
1115   LIR_Opr dest = op->result_opr();
1116 
1117   switch (op->bytecode()) {
1118     case Bytecodes::_i2f:
1119       {
1120         __ scvtfws(dest->as_float_reg(), src->as_register());
1121         break;
1122       }
1123     case Bytecodes::_i2d:
1124       {
1125         __ scvtfwd(dest->as_double_reg(), src->as_register());
1126         break;
1127       }
1128     case Bytecodes::_l2d:
1129       {
1130         __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1131         break;
1132       }
1133     case Bytecodes::_l2f:
1134       {
1135         __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1136         break;
1137       }
1138     case Bytecodes::_f2d:
1139       {
1140         __ fcvts(dest->as_double_reg(), src->as_float_reg());
1141         break;
1142       }
1143     case Bytecodes::_d2f:
1144       {
1145         __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1146         break;
1147       }
1148     case Bytecodes::_i2c:
1149       {
1150         __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1151         break;
1152       }
1153     case Bytecodes::_i2l:
1154       {
1155         __ sxtw(dest->as_register_lo(), src->as_register());
1156         break;
1157       }
1158     case Bytecodes::_i2s:
1159       {
1160         __ sxth(dest->as_register(), src->as_register());
1161         break;
1162       }
1163     case Bytecodes::_i2b:
1164       {
1165         __ sxtb(dest->as_register(), src->as_register());
1166         break;
1167       }
1168     case Bytecodes::_l2i:
1169       {
1170         _masm->block_comment("FIXME: This could be a no-op");
1171         __ uxtw(dest->as_register(), src->as_register_lo());
1172         break;
1173       }
1174     case Bytecodes::_d2l:
1175       {
1176         __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1177         break;
1178       }
1179     case Bytecodes::_f2i:
1180       {
1181         __ fcvtzsw(dest->as_register(), src->as_float_reg());
1182         break;
1183       }
1184     case Bytecodes::_f2l:
1185       {
1186         __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1187         break;
1188       }
1189     case Bytecodes::_d2i:
1190       {
1191         __ fcvtzdw(dest->as_register(), src->as_double_reg());
1192         break;
1193       }
1194     default: ShouldNotReachHere();
1195   }
1196 }
1197 
1198 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1199   if (op->init_check()) {
1200     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1201                                InstanceKlass::init_state_offset()));
1202     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1203     add_debug_info_for_null_check_here(op->stub()->info());
1204     __ br(Assembler::NE, *op->stub()->entry());
1205   }
1206   __ allocate_object(op->obj()->as_register(),
1207                      op->tmp1()->as_register(),
1208                      op->tmp2()->as_register(),
1209                      op->header_size(),
1210                      op->object_size(),
1211                      op->klass()->as_register(),
1212                      *op->stub()->entry());
1213   __ bind(*op->stub()->continuation());
1214 }
1215 
1216 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1217   Register len =  op->len()->as_register();
1218   __ uxtw(len, len);
1219 
1220   if (UseSlowPath ||
1221       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1222       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1223     __ b(*op->stub()->entry());
1224   } else {
1225     Register tmp1 = op->tmp1()->as_register();
1226     Register tmp2 = op->tmp2()->as_register();
1227     Register tmp3 = op->tmp3()->as_register();
1228     if (len == tmp1) {
1229       tmp1 = tmp3;
1230     } else if (len == tmp2) {
1231       tmp2 = tmp3;
1232     } else if (len == tmp3) {
1233       // everything is ok
1234     } else {
1235       __ mov(tmp3, len);
1236     }
1237     __ allocate_array(op->obj()->as_register(),
1238                       len,
1239                       tmp1,
1240                       tmp2,
1241                       arrayOopDesc::base_offset_in_bytes(op->type()),
1242                       array_element_size(op->type()),
1243                       op->klass()->as_register(),
1244                       *op->stub()->entry());
1245   }
1246   __ bind(*op->stub()->continuation());
1247 }
1248 
1249 void LIR_Assembler::type_profile_helper(Register mdo,
1250                                         ciMethodData *md, ciProfileData *data,
1251                                         Register recv, Label* update_done) {
1252   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1253     Label next_test;
1254     // See if the receiver is receiver[n].
1255     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1256     __ ldr(rscratch1, Address(rscratch2));
1257     __ cmp(recv, rscratch1);
1258     __ br(Assembler::NE, next_test);
1259     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1260     __ addptr(data_addr, DataLayout::counter_increment);
1261     __ b(*update_done);
1262     __ bind(next_test);
1263   }
1264 
1265   // Didn't find receiver; find next empty slot and fill it in
1266   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1267     Label next_test;
1268     __ lea(rscratch2,
1269            Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1270     Address recv_addr(rscratch2);
1271     __ ldr(rscratch1, recv_addr);
1272     __ cbnz(rscratch1, next_test);
1273     __ str(recv, recv_addr);
1274     __ mov(rscratch1, DataLayout::counter_increment);
1275     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1276     __ str(rscratch1, Address(rscratch2));
1277     __ b(*update_done);
1278     __ bind(next_test);
1279   }
1280 }
1281 
1282 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1283   // we always need a stub for the failure case.
1284   CodeStub* stub = op->stub();
1285   Register obj = op->object()->as_register();
1286   Register k_RInfo = op->tmp1()->as_register();
1287   Register klass_RInfo = op->tmp2()->as_register();
1288   Register dst = op->result_opr()->as_register();
1289   ciKlass* k = op->klass();
1290   Register Rtmp1 = noreg;
1291 
1292   // check if it needs to be profiled
1293   ciMethodData* md;
1294   ciProfileData* data;
1295 
1296   const bool should_profile = op->should_profile();
1297 
1298   if (should_profile) {
1299     ciMethod* method = op->profiled_method();
1300     assert(method != NULL, "Should have method");
1301     int bci = op->profiled_bci();
1302     md = method->method_data_or_null();
1303     assert(md != NULL, "Sanity");
1304     data = md->bci_to_data(bci);
1305     assert(data != NULL,                "need data for type check");
1306     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1307   }
1308   Label profile_cast_success, profile_cast_failure;
1309   Label *success_target = should_profile ? &profile_cast_success : success;
1310   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1311 
1312   if (obj == k_RInfo) {
1313     k_RInfo = dst;
1314   } else if (obj == klass_RInfo) {
1315     klass_RInfo = dst;
1316   }
1317   if (k->is_loaded() && !UseCompressedClassPointers) {
1318     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1319   } else {
1320     Rtmp1 = op->tmp3()->as_register();
1321     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1322   }
1323 
1324   assert_different_registers(obj, k_RInfo, klass_RInfo);
1325 
1326     if (should_profile) {
1327       Label not_null;
1328       __ cbnz(obj, not_null);
1329       // Object is null; update MDO and exit
1330       Register mdo  = klass_RInfo;
1331       __ mov_metadata(mdo, md->constant_encoding());
1332       Address data_addr
1333         = __ form_address(rscratch2, mdo,
1334                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1335                           0);
1336       __ ldrb(rscratch1, data_addr);
1337       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1338       __ strb(rscratch1, data_addr);
1339       __ b(*obj_is_null);
1340       __ bind(not_null);
1341     } else {
1342       __ cbz(obj, *obj_is_null);
1343     }
1344 
1345   if (!k->is_loaded()) {
1346     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1347   } else {
1348     __ mov_metadata(k_RInfo, k->constant_encoding());
1349   }
1350   __ verify_oop(obj);
1351 
1352   if (op->fast_check()) {
1353     // get object class
1354     // not a safepoint as obj null check happens earlier
1355     __ load_klass(rscratch1, obj);
1356     __ cmp( rscratch1, k_RInfo);
1357 
1358     __ br(Assembler::NE, *failure_target);
1359     // successful cast, fall through to profile or jump
1360   } else {
1361     // get object class
1362     // not a safepoint as obj null check happens earlier
1363     __ load_klass(klass_RInfo, obj);
1364     if (k->is_loaded()) {
1365       // See if we get an immediate positive hit
1366       __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1367       __ cmp(k_RInfo, rscratch1);
1368       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1369         __ br(Assembler::NE, *failure_target);
1370         // successful cast, fall through to profile or jump
1371       } else {
1372         // See if we get an immediate positive hit
1373         __ br(Assembler::EQ, *success_target);
1374         // check for self
1375         __ cmp(klass_RInfo, k_RInfo);
1376         __ br(Assembler::EQ, *success_target);
1377 
1378         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1379         __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1380         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1381         // result is a boolean
1382         __ cbzw(klass_RInfo, *failure_target);
1383         // successful cast, fall through to profile or jump
1384       }
1385     } else {
1386       // perform the fast part of the checking logic
1387       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1388       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1389       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1390       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1391       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1392       // result is a boolean
1393       __ cbz(k_RInfo, *failure_target);
1394       // successful cast, fall through to profile or jump
1395     }
1396   }
1397   if (should_profile) {
1398     Register mdo  = klass_RInfo, recv = k_RInfo;
1399     __ bind(profile_cast_success);
1400     __ mov_metadata(mdo, md->constant_encoding());
1401     __ load_klass(recv, obj);
1402     Label update_done;
1403     type_profile_helper(mdo, md, data, recv, success);
1404     __ b(*success);
1405 
1406     __ bind(profile_cast_failure);
1407     __ mov_metadata(mdo, md->constant_encoding());
1408     Address counter_addr
1409       = __ form_address(rscratch2, mdo,
1410                         md->byte_offset_of_slot(data, CounterData::count_offset()),
1411                         0);
1412     __ ldr(rscratch1, counter_addr);
1413     __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1414     __ str(rscratch1, counter_addr);
1415     __ b(*failure);
1416   }
1417   __ b(*success);
1418 }
1419 
1420 
1421 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1422   const bool should_profile = op->should_profile();
1423 
1424   LIR_Code code = op->code();
1425   if (code == lir_store_check) {
1426     Register value = op->object()->as_register();
1427     Register array = op->array()->as_register();
1428     Register k_RInfo = op->tmp1()->as_register();
1429     Register klass_RInfo = op->tmp2()->as_register();
1430     Register Rtmp1 = op->tmp3()->as_register();
1431 
1432     CodeStub* stub = op->stub();
1433 
1434     // check if it needs to be profiled
1435     ciMethodData* md;
1436     ciProfileData* data;
1437 
1438     if (should_profile) {
1439       ciMethod* method = op->profiled_method();
1440       assert(method != NULL, "Should have method");
1441       int bci = op->profiled_bci();
1442       md = method->method_data_or_null();
1443       assert(md != NULL, "Sanity");
1444       data = md->bci_to_data(bci);
1445       assert(data != NULL,                "need data for type check");
1446       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1447     }
1448     Label profile_cast_success, profile_cast_failure, done;
1449     Label *success_target = should_profile ? &profile_cast_success : &done;
1450     Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
1451 
1452     if (should_profile) {
1453       Label not_null;
1454       __ cbnz(value, not_null);
1455       // Object is null; update MDO and exit
1456       Register mdo  = klass_RInfo;
1457       __ mov_metadata(mdo, md->constant_encoding());
1458       Address data_addr
1459         = __ form_address(rscratch2, mdo,
1460                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1461                           0);
1462       __ ldrb(rscratch1, data_addr);
1463       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1464       __ strb(rscratch1, data_addr);
1465       __ b(done);
1466       __ bind(not_null);
1467     } else {
1468       __ cbz(value, done);
1469     }
1470 
1471     add_debug_info_for_null_check_here(op->info_for_exception());
1472     __ load_klass(k_RInfo, array);
1473     __ load_klass(klass_RInfo, value);
1474 
1475     // get instance klass (it's already uncompressed)
1476     __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1477     // perform the fast part of the checking logic
1478     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1479     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1480     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1481     __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1482     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1483     // result is a boolean
1484     __ cbzw(k_RInfo, *failure_target);
1485     // fall through to the success case
1486 
1487     if (should_profile) {
1488       Register mdo  = klass_RInfo, recv = k_RInfo;
1489       __ bind(profile_cast_success);
1490       __ mov_metadata(mdo, md->constant_encoding());
1491       __ load_klass(recv, value);
1492       Label update_done;
1493       type_profile_helper(mdo, md, data, recv, &done);
1494       __ b(done);
1495 
1496       __ bind(profile_cast_failure);
1497       __ mov_metadata(mdo, md->constant_encoding());
1498       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1499       __ lea(rscratch2, counter_addr);
1500       __ ldr(rscratch1, Address(rscratch2));
1501       __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1502       __ str(rscratch1, Address(rscratch2));
1503       __ b(*stub->entry());
1504     }
1505 
1506     __ bind(done);
1507   } else if (code == lir_checkcast) {
1508     Register obj = op->object()->as_register();
1509     Register dst = op->result_opr()->as_register();
1510     Label success;
1511     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1512     __ bind(success);
1513     if (dst != obj) {
1514       __ mov(dst, obj);
1515     }
1516   } else if (code == lir_instanceof) {
1517     Register obj = op->object()->as_register();
1518     Register dst = op->result_opr()->as_register();
1519     Label success, failure, done;
1520     emit_typecheck_helper(op, &success, &failure, &failure);
1521     __ bind(failure);
1522     __ mov(dst, zr);
1523     __ b(done);
1524     __ bind(success);
1525     __ mov(dst, 1);
1526     __ bind(done);
1527   } else {
1528     ShouldNotReachHere();
1529   }
1530 }
1531 
1532 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1533   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1534   __ cset(rscratch1, Assembler::NE);
1535   __ membar(__ AnyAny);
1536 }
1537 
1538 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1539   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1540   __ cset(rscratch1, Assembler::NE);
1541   __ membar(__ AnyAny);
1542 }
1543 
1544 
1545 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1546   assert(VM_Version::supports_cx8(), "wrong machine");
1547   Register addr;
1548   if (op->addr()->is_register()) {
1549     addr = as_reg(op->addr());
1550   } else {
1551     assert(op->addr()->is_address(), "what else?");
1552     LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1553     assert(addr_ptr->disp() == 0, "need 0 disp");
1554     assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index");
1555     addr = as_reg(addr_ptr->base());
1556   }
1557   Register newval = as_reg(op->new_value());
1558   Register cmpval = as_reg(op->cmp_value());
1559 
1560   if (op->code() == lir_cas_obj) {
1561     if (UseCompressedOops) {
1562       Register t1 = op->tmp1()->as_register();
1563       assert(op->tmp1()->is_valid(), "must be");
1564       __ encode_heap_oop(t1, cmpval);
1565       cmpval = t1;
1566       __ encode_heap_oop(rscratch2, newval);
1567       newval = rscratch2;
1568       casw(addr, newval, cmpval);
1569     } else {
1570       casl(addr, newval, cmpval);
1571     }
1572   } else if (op->code() == lir_cas_int) {
1573     casw(addr, newval, cmpval);
1574   } else {
1575     casl(addr, newval, cmpval);
1576   }
1577 }
1578 
1579 
1580 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1581 
1582   Assembler::Condition acond, ncond;
1583   switch (condition) {
1584   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1585   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1586   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1587   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1588   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1589   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1590   case lir_cond_belowEqual:
1591   case lir_cond_aboveEqual:
1592   default:                    ShouldNotReachHere();
1593     acond = Assembler::EQ; ncond = Assembler::NE;  // unreachable
1594   }
1595 
1596   assert(result->is_single_cpu() || result->is_double_cpu(),
1597          "expect single register for result");
1598   if (opr1->is_constant() && opr2->is_constant()
1599       && opr1->type() == T_INT && opr2->type() == T_INT) {
1600     jint val1 = opr1->as_jint();
1601     jint val2 = opr2->as_jint();
1602     if (val1 == 0 && val2 == 1) {
1603       __ cset(result->as_register(), ncond);
1604       return;
1605     } else if (val1 == 1 && val2 == 0) {
1606       __ cset(result->as_register(), acond);
1607       return;
1608     }
1609   }
1610 
1611   if (opr1->is_constant() && opr2->is_constant()
1612       && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1613     jlong val1 = opr1->as_jlong();
1614     jlong val2 = opr2->as_jlong();
1615     if (val1 == 0 && val2 == 1) {
1616       __ cset(result->as_register_lo(), ncond);
1617       return;
1618     } else if (val1 == 1 && val2 == 0) {
1619       __ cset(result->as_register_lo(), acond);
1620       return;
1621     }
1622   }
1623 
1624   if (opr1->is_stack()) {
1625     stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1626     opr1 = FrameMap::rscratch1_opr;
1627   } else if (opr1->is_constant()) {
1628     LIR_Opr tmp
1629       = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1630     const2reg(opr1, tmp, lir_patch_none, NULL);
1631     opr1 = tmp;
1632   }
1633 
1634   if (opr2->is_stack()) {
1635     stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1636     opr2 = FrameMap::rscratch2_opr;
1637   } else if (opr2->is_constant()) {
1638     LIR_Opr tmp
1639       = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1640     const2reg(opr2, tmp, lir_patch_none, NULL);
1641     opr2 = tmp;
1642   }
1643 
1644   if (result->type() == T_LONG)
1645     __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1646   else
1647     __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1648 }
1649 
1650 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1651   assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1652 
1653   if (left->is_single_cpu()) {
1654     Register lreg = left->as_register();
1655     Register dreg = as_reg(dest);
1656 
1657     if (right->is_single_cpu()) {
1658       // cpu register - cpu register
1659 
1660       assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1661              "should be");
1662       Register rreg = right->as_register();
1663       switch (code) {
1664       case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1665       case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1666       case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1667       default:      ShouldNotReachHere();
1668       }
1669 
1670     } else if (right->is_double_cpu()) {
1671       Register rreg = right->as_register_lo();
1672       // single_cpu + double_cpu: can happen with obj+long
1673       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1674       switch (code) {
1675       case lir_add: __ add(dreg, lreg, rreg); break;
1676       case lir_sub: __ sub(dreg, lreg, rreg); break;
1677       default: ShouldNotReachHere();
1678       }
1679     } else if (right->is_constant()) {
1680       // cpu register - constant
1681       jlong c;
1682 
1683       // FIXME.  This is fugly: we really need to factor all this logic.
1684       switch(right->type()) {
1685       case T_LONG:
1686         c = right->as_constant_ptr()->as_jlong();
1687         break;
1688       case T_INT:
1689       case T_ADDRESS:
1690         c = right->as_constant_ptr()->as_jint();
1691         break;
1692       default:
1693         ShouldNotReachHere();
1694         c = 0;  // unreachable
1695         break;
1696       }
1697 
1698       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1699       if (c == 0 && dreg == lreg) {
1700         COMMENT("effective nop elided");
1701         return;
1702       }
1703       switch(left->type()) {
1704       case T_INT:
1705         switch (code) {
1706         case lir_add: __ addw(dreg, lreg, c); break;
1707         case lir_sub: __ subw(dreg, lreg, c); break;
1708         default: ShouldNotReachHere();
1709         }
1710         break;
1711       case T_OBJECT:
1712       case T_ADDRESS:
1713         switch (code) {
1714         case lir_add: __ add(dreg, lreg, c); break;
1715         case lir_sub: __ sub(dreg, lreg, c); break;
1716         default: ShouldNotReachHere();
1717         }
1718         break;
1719       default:
1720         ShouldNotReachHere();
1721       }
1722     } else {
1723       ShouldNotReachHere();
1724     }
1725 
1726   } else if (left->is_double_cpu()) {
1727     Register lreg_lo = left->as_register_lo();
1728 
1729     if (right->is_double_cpu()) {
1730       // cpu register - cpu register
1731       Register rreg_lo = right->as_register_lo();
1732       switch (code) {
1733       case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1734       case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1735       case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1736       case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1737       case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1738       default:
1739         ShouldNotReachHere();
1740       }
1741 
1742     } else if (right->is_constant()) {
1743       jlong c = right->as_constant_ptr()->as_jlong();
1744       Register dreg = as_reg(dest);
1745       switch (code) {
1746         case lir_add:
1747         case lir_sub:
1748           if (c == 0 && dreg == lreg_lo) {
1749             COMMENT("effective nop elided");
1750             return;
1751           }
1752           code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
1753           break;
1754         case lir_div:
1755           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1756           if (c == 1) {
1757             // move lreg_lo to dreg if divisor is 1
1758             __ mov(dreg, lreg_lo);
1759           } else {
1760             unsigned int shift = log2i_exact(c);
1761             // use rscratch1 as intermediate result register
1762             __ asr(rscratch1, lreg_lo, 63);
1763             __ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);
1764             __ asr(dreg, rscratch1, shift);
1765           }
1766           break;
1767         case lir_rem:
1768           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1769           if (c == 1) {
1770             // move 0 to dreg if divisor is 1
1771             __ mov(dreg, zr);
1772           } else {
1773             // use rscratch1 as intermediate result register
1774             __ negs(rscratch1, lreg_lo);
1775             __ andr(dreg, lreg_lo, c - 1);
1776             __ andr(rscratch1, rscratch1, c - 1);
1777             __ csneg(dreg, dreg, rscratch1, Assembler::MI);
1778           }
1779           break;
1780         default:
1781           ShouldNotReachHere();
1782       }
1783     } else {
1784       ShouldNotReachHere();
1785     }
1786   } else if (left->is_single_fpu()) {
1787     assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1788     switch (code) {
1789     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1790     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1791     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1792     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1793     default:
1794       ShouldNotReachHere();
1795     }
1796   } else if (left->is_double_fpu()) {
1797     if (right->is_double_fpu()) {
1798       // fpu register - fpu register
1799       switch (code) {
1800       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1801       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1802       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1803       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1804       default:
1805         ShouldNotReachHere();
1806       }
1807     } else {
1808       if (right->is_constant()) {
1809         ShouldNotReachHere();
1810       }
1811       ShouldNotReachHere();
1812     }
1813   } else if (left->is_single_stack() || left->is_address()) {
1814     assert(left == dest, "left and dest must be equal");
1815     ShouldNotReachHere();
1816   } else {
1817     ShouldNotReachHere();
1818   }
1819 }
1820 
1821 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
1822 
1823 
1824 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1825   switch(code) {
1826   case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1827   case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1828   default      : ShouldNotReachHere();
1829   }
1830 }
1831 
1832 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1833 
1834   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1835   Register Rleft = left->is_single_cpu() ? left->as_register() :
1836                                            left->as_register_lo();
1837    if (dst->is_single_cpu()) {
1838      Register Rdst = dst->as_register();
1839      if (right->is_constant()) {
1840        switch (code) {
1841          case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1842          case lir_logic_or:  __ orrw (Rdst, Rleft, right->as_jint()); break;
1843          case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1844          default: ShouldNotReachHere(); break;
1845        }
1846      } else {
1847        Register Rright = right->is_single_cpu() ? right->as_register() :
1848                                                   right->as_register_lo();
1849        switch (code) {
1850          case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1851          case lir_logic_or:  __ orrw (Rdst, Rleft, Rright); break;
1852          case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1853          default: ShouldNotReachHere(); break;
1854        }
1855      }
1856    } else {
1857      Register Rdst = dst->as_register_lo();
1858      if (right->is_constant()) {
1859        switch (code) {
1860          case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1861          case lir_logic_or:  __ orr (Rdst, Rleft, right->as_jlong()); break;
1862          case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1863          default: ShouldNotReachHere(); break;
1864        }
1865      } else {
1866        Register Rright = right->is_single_cpu() ? right->as_register() :
1867                                                   right->as_register_lo();
1868        switch (code) {
1869          case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1870          case lir_logic_or:  __ orr (Rdst, Rleft, Rright); break;
1871          case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1872          default: ShouldNotReachHere(); break;
1873        }
1874      }
1875    }
1876 }
1877 
1878 
1879 
1880 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {
1881 
1882   // opcode check
1883   assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
1884   bool is_irem = (code == lir_irem);
1885 
1886   // operand check
1887   assert(left->is_single_cpu(),   "left must be register");
1888   assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
1889   assert(result->is_single_cpu(), "result must be register");
1890   Register lreg = left->as_register();
1891   Register dreg = result->as_register();
1892 
1893   // power-of-2 constant check and codegen
1894   if (right->is_constant()) {
1895     int c = right->as_constant_ptr()->as_jint();
1896     assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1897     if (is_irem) {
1898       if (c == 1) {
1899         // move 0 to dreg if divisor is 1
1900         __ movw(dreg, zr);
1901       } else {
1902         // use rscratch1 as intermediate result register
1903         __ negsw(rscratch1, lreg);
1904         __ andw(dreg, lreg, c - 1);
1905         __ andw(rscratch1, rscratch1, c - 1);
1906         __ csnegw(dreg, dreg, rscratch1, Assembler::MI);
1907       }
1908     } else {
1909       if (c == 1) {
1910         // move lreg to dreg if divisor is 1
1911         __ movw(dreg, lreg);
1912       } else {
1913         unsigned int shift = exact_log2(c);
1914         // use rscratch1 as intermediate result register
1915         __ asrw(rscratch1, lreg, 31);
1916         __ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
1917         __ asrw(dreg, rscratch1, shift);
1918       }
1919     }
1920   } else {
1921     Register rreg = right->as_register();
1922     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1923   }
1924 }
1925 
1926 
1927 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1928   if (opr1->is_constant() && opr2->is_single_cpu()) {
1929     // tableswitch
1930     Register reg = as_reg(opr2);
1931     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1932     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1933   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1934     Register reg1 = as_reg(opr1);
1935     if (opr2->is_single_cpu()) {
1936       // cpu register - cpu register
1937       Register reg2 = opr2->as_register();
1938       if (is_reference_type(opr1->type())) {
1939         __ cmpoop(reg1, reg2);
1940       } else {
1941         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1942         __ cmpw(reg1, reg2);
1943       }
1944       return;
1945     }
1946     if (opr2->is_double_cpu()) {
1947       // cpu register - cpu register
1948       Register reg2 = opr2->as_register_lo();
1949       __ cmp(reg1, reg2);
1950       return;
1951     }
1952 
1953     if (opr2->is_constant()) {
1954       bool is_32bit = false; // width of register operand
1955       jlong imm;
1956 
1957       switch(opr2->type()) {
1958       case T_INT:
1959         imm = opr2->as_constant_ptr()->as_jint();
1960         is_32bit = true;
1961         break;
1962       case T_LONG:
1963         imm = opr2->as_constant_ptr()->as_jlong();
1964         break;
1965       case T_ADDRESS:
1966         imm = opr2->as_constant_ptr()->as_jint();
1967         break;
1968       case T_METADATA:
1969         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1970         break;
1971       case T_OBJECT:
1972       case T_ARRAY:
1973         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1974         __ cmpoop(reg1, rscratch1);
1975         return;
1976       default:
1977         ShouldNotReachHere();
1978         imm = 0;  // unreachable
1979         break;
1980       }
1981 
1982       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1983         if (is_32bit)
1984           __ cmpw(reg1, imm);
1985         else
1986           __ subs(zr, reg1, imm);
1987         return;
1988       } else {
1989         __ mov(rscratch1, imm);
1990         if (is_32bit)
1991           __ cmpw(reg1, rscratch1);
1992         else
1993           __ cmp(reg1, rscratch1);
1994         return;
1995       }
1996     } else
1997       ShouldNotReachHere();
1998   } else if (opr1->is_single_fpu()) {
1999     FloatRegister reg1 = opr1->as_float_reg();
2000     assert(opr2->is_single_fpu(), "expect single float register");
2001     FloatRegister reg2 = opr2->as_float_reg();
2002     __ fcmps(reg1, reg2);
2003   } else if (opr1->is_double_fpu()) {
2004     FloatRegister reg1 = opr1->as_double_reg();
2005     assert(opr2->is_double_fpu(), "expect double float register");
2006     FloatRegister reg2 = opr2->as_double_reg();
2007     __ fcmpd(reg1, reg2);
2008   } else {
2009     ShouldNotReachHere();
2010   }
2011 }
2012 
2013 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2014   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2015     bool is_unordered_less = (code == lir_ucmp_fd2i);
2016     if (left->is_single_fpu()) {
2017       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2018     } else if (left->is_double_fpu()) {
2019       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2020     } else {
2021       ShouldNotReachHere();
2022     }
2023   } else if (code == lir_cmp_l2i) {
2024     Label done;
2025     __ cmp(left->as_register_lo(), right->as_register_lo());
2026     __ mov(dst->as_register(), (uint64_t)-1L);
2027     __ br(Assembler::LT, done);
2028     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2029     __ bind(done);
2030   } else {
2031     ShouldNotReachHere();
2032   }
2033 }
2034 
2035 
2036 void LIR_Assembler::align_call(LIR_Code code) {  }
2037 
2038 
2039 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2040   address call = __ trampoline_call(Address(op->addr(), rtype));
2041   if (call == NULL) {
2042     bailout("trampoline stub overflow");
2043     return;
2044   }
2045   add_call_info(code_offset(), op->info());
2046 }
2047 
2048 
2049 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2050   address call = __ ic_call(op->addr());
2051   if (call == NULL) {
2052     bailout("trampoline stub overflow");
2053     return;
2054   }
2055   add_call_info(code_offset(), op->info());
2056 }
2057 
2058 void LIR_Assembler::emit_static_call_stub() {
2059   address call_pc = __ pc();
2060   address stub = __ start_a_stub(call_stub_size());
2061   if (stub == NULL) {
2062     bailout("static call stub overflow");
2063     return;
2064   }
2065 
2066   int start = __ offset();
2067 
2068   __ relocate(static_stub_Relocation::spec(call_pc));
2069   __ emit_static_call_stub();
2070 
2071   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2072         <= call_stub_size(), "stub too big");
2073   __ end_a_stub();
2074 }
2075 
2076 
2077 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2078   assert(exceptionOop->as_register() == r0, "must match");
2079   assert(exceptionPC->as_register() == r3, "must match");
2080 
2081   // exception object is not added to oop map by LinearScan
2082   // (LinearScan assumes that no oops are in fixed registers)
2083   info->add_register_oop(exceptionOop);
2084   Runtime1::StubID unwind_id;
2085 
2086   // get current pc information
2087   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2088   if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
2089     // As no instructions have been generated yet for this LIR node it's
2090     // possible that an oop map already exists for the current offset.
2091     // In that case insert an dummy NOP here to ensure all oop map PCs
2092     // are unique. See JDK-8237483.
2093     __ nop();
2094   }
2095   int pc_for_athrow_offset = __ offset();
2096   InternalAddress pc_for_athrow(__ pc());
2097   __ adr(exceptionPC->as_register(), pc_for_athrow);
2098   add_call_info(pc_for_athrow_offset, info); // for exception handler
2099 
2100   __ verify_not_null_oop(r0);
2101   // search an exception handler (r0: exception oop, r3: throwing pc)
2102   if (compilation()->has_fpu_code()) {
2103     unwind_id = Runtime1::handle_exception_id;
2104   } else {
2105     unwind_id = Runtime1::handle_exception_nofpu_id;
2106   }
2107   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2108 
2109   // FIXME: enough room for two byte trap   ????
2110   __ nop();
2111 }
2112 
2113 
2114 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2115   assert(exceptionOop->as_register() == r0, "must match");
2116 
2117   __ b(_unwind_handler_entry);
2118 }
2119 
2120 
2121 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2122   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2123   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2124 
2125   switch (left->type()) {
2126     case T_INT: {
2127       switch (code) {
2128       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2129       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2130       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2131       default:
2132         ShouldNotReachHere();
2133         break;
2134       }
2135       break;
2136     case T_LONG:
2137     case T_ADDRESS:
2138     case T_OBJECT:
2139       switch (code) {
2140       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2141       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2142       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2143       default:
2144         ShouldNotReachHere();
2145         break;
2146       }
2147       break;
2148     default:
2149       ShouldNotReachHere();
2150       break;
2151     }
2152   }
2153 }
2154 
2155 
2156 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2157   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2158   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2159 
2160   switch (left->type()) {
2161     case T_INT: {
2162       switch (code) {
2163       case lir_shl:  __ lslw (dreg, lreg, count); break;
2164       case lir_shr:  __ asrw (dreg, lreg, count); break;
2165       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2166       default:
2167         ShouldNotReachHere();
2168         break;
2169       }
2170       break;
2171     case T_LONG:
2172     case T_ADDRESS:
2173     case T_OBJECT:
2174       switch (code) {
2175       case lir_shl:  __ lsl (dreg, lreg, count); break;
2176       case lir_shr:  __ asr (dreg, lreg, count); break;
2177       case lir_ushr: __ lsr (dreg, lreg, count); break;
2178       default:
2179         ShouldNotReachHere();
2180         break;
2181       }
2182       break;
2183     default:
2184       ShouldNotReachHere();
2185       break;
2186     }
2187   }
2188 }
2189 
2190 
2191 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2192   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2193   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2194   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2195   __ str (r, Address(sp, offset_from_rsp_in_bytes));
2196 }
2197 
2198 
2199 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2200   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2201   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2202   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2203   __ mov (rscratch1, c);
2204   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2205 }
2206 
2207 
2208 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2209   ShouldNotReachHere();
2210   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2211   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2212   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2213   __ lea(rscratch1, __ constant_oop_address(o));
2214   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2215 }
2216 
2217 
2218 // This code replaces a call to arraycopy; no exception may
2219 // be thrown in this code, they must be thrown in the System.arraycopy
2220 // activation frame; we could save some checks if this would not be the case
2221 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2222   ciArrayKlass* default_type = op->expected_type();
2223   Register src = op->src()->as_register();
2224   Register dst = op->dst()->as_register();
2225   Register src_pos = op->src_pos()->as_register();
2226   Register dst_pos = op->dst_pos()->as_register();
2227   Register length  = op->length()->as_register();
2228   Register tmp = op->tmp()->as_register();
2229 
2230   CodeStub* stub = op->stub();
2231   int flags = op->flags();
2232   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2233   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2234 
2235   // if we don't know anything, just go through the generic arraycopy
2236   if (default_type == NULL // || basic_type == T_OBJECT
2237       ) {
2238     Label done;
2239     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2240 
2241     // Save the arguments in case the generic arraycopy fails and we
2242     // have to fall back to the JNI stub
2243     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2244     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2245     __ str(src,              Address(sp, 4*BytesPerWord));
2246 
2247     address copyfunc_addr = StubRoutines::generic_arraycopy();
2248     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2249 
2250     // The arguments are in java calling convention so we shift them
2251     // to C convention
2252     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2253     __ mov(c_rarg0, j_rarg0);
2254     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2255     __ mov(c_rarg1, j_rarg1);
2256     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2257     __ mov(c_rarg2, j_rarg2);
2258     assert_different_registers(c_rarg3, j_rarg4);
2259     __ mov(c_rarg3, j_rarg3);
2260     __ mov(c_rarg4, j_rarg4);
2261 #ifndef PRODUCT
2262     if (PrintC1Statistics) {
2263       __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2264     }
2265 #endif
2266     __ far_call(RuntimeAddress(copyfunc_addr));
2267 
2268     __ cbz(r0, *stub->continuation());
2269 
2270     // Reload values from the stack so they are where the stub
2271     // expects them.
2272     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2273     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2274     __ ldr(src,              Address(sp, 4*BytesPerWord));
2275 
2276     // r0 is -1^K where K == partial copied count
2277     __ eonw(rscratch1, r0, zr);
2278     // adjust length down and src/end pos up by partial copied count
2279     __ subw(length, length, rscratch1);
2280     __ addw(src_pos, src_pos, rscratch1);
2281     __ addw(dst_pos, dst_pos, rscratch1);
2282     __ b(*stub->entry());
2283 
2284     __ bind(*stub->continuation());
2285     return;
2286   }
2287 
2288   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2289 
2290   int elem_size = type2aelembytes(basic_type);
2291   int scale = exact_log2(elem_size);
2292 
2293   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2294   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2295   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2296   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2297 
2298   // test for NULL
2299   if (flags & LIR_OpArrayCopy::src_null_check) {
2300     __ cbz(src, *stub->entry());
2301   }
2302   if (flags & LIR_OpArrayCopy::dst_null_check) {
2303     __ cbz(dst, *stub->entry());
2304   }
2305 
2306   // If the compiler was not able to prove that exact type of the source or the destination
2307   // of the arraycopy is an array type, check at runtime if the source or the destination is
2308   // an instance type.
2309   if (flags & LIR_OpArrayCopy::type_check) {
2310     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2311       __ load_klass(tmp, dst);
2312       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2313       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2314       __ br(Assembler::GE, *stub->entry());
2315     }
2316 
2317     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2318       __ load_klass(tmp, src);
2319       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2320       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2321       __ br(Assembler::GE, *stub->entry());
2322     }
2323   }
2324 
2325   // check if negative
2326   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2327     __ cmpw(src_pos, 0);
2328     __ br(Assembler::LT, *stub->entry());
2329   }
2330   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2331     __ cmpw(dst_pos, 0);
2332     __ br(Assembler::LT, *stub->entry());
2333   }
2334 
2335   if (flags & LIR_OpArrayCopy::length_positive_check) {
2336     __ cmpw(length, 0);
2337     __ br(Assembler::LT, *stub->entry());
2338   }
2339 
2340   if (flags & LIR_OpArrayCopy::src_range_check) {
2341     __ addw(tmp, src_pos, length);
2342     __ ldrw(rscratch1, src_length_addr);
2343     __ cmpw(tmp, rscratch1);
2344     __ br(Assembler::HI, *stub->entry());
2345   }
2346   if (flags & LIR_OpArrayCopy::dst_range_check) {
2347     __ addw(tmp, dst_pos, length);
2348     __ ldrw(rscratch1, dst_length_addr);
2349     __ cmpw(tmp, rscratch1);
2350     __ br(Assembler::HI, *stub->entry());
2351   }
2352 
2353   if (flags & LIR_OpArrayCopy::type_check) {
2354     // We don't know the array types are compatible
2355     if (basic_type != T_OBJECT) {
2356       // Simple test for basic type arrays
2357       if (UseCompressedClassPointers) {
2358         __ load_nklass(tmp, src);
2359         __ load_nklass(rscratch1, dst);
2360         __ cmpw(tmp, rscratch1);
2361       } else {
2362         __ ldr(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
2363         __ ldr(rscratch1, Address(dst, oopDesc::klass_offset_in_bytes()));
2364         __ cmp(tmp, rscratch1);
2365       }
2366       __ br(Assembler::NE, *stub->entry());
2367     } else {
2368       // For object arrays, if src is a sub class of dst then we can
2369       // safely do the copy.
2370       Label cont, slow;
2371 
2372 #define PUSH(r1, r2)                                    \
2373       stp(r1, r2, __ pre(sp, -2 * wordSize));
2374 
2375 #define POP(r1, r2)                                     \
2376       ldp(r1, r2, __ post(sp, 2 * wordSize));
2377 
2378       __ PUSH(src, dst);
2379 
2380       __ load_klass(src, src);
2381       __ load_klass(dst, dst);
2382 
2383       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2384 
2385       __ PUSH(src, dst);
2386       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2387       __ POP(src, dst);
2388 
2389       __ cbnz(src, cont);
2390 
2391       __ bind(slow);
2392       __ POP(src, dst);
2393 
2394       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2395       if (copyfunc_addr != NULL) { // use stub if available
2396         // src is not a sub class of dst so we have to do a
2397         // per-element check.
2398 
2399         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2400         if ((flags & mask) != mask) {
2401           // Check that at least both of them object arrays.
2402           assert(flags & mask, "one of the two should be known to be an object array");
2403 
2404           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2405             __ load_klass(tmp, src);
2406           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2407             __ load_klass(tmp, dst);
2408           }
2409           int lh_offset = in_bytes(Klass::layout_helper_offset());
2410           Address klass_lh_addr(tmp, lh_offset);
2411           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2412           __ ldrw(rscratch1, klass_lh_addr);
2413           __ mov(rscratch2, objArray_lh);
2414           __ eorw(rscratch1, rscratch1, rscratch2);
2415           __ cbnzw(rscratch1, *stub->entry());
2416         }
2417 
2418        // Spill because stubs can use any register they like and it's
2419        // easier to restore just those that we care about.
2420         __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2421         __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2422         __ str(src,              Address(sp, 4*BytesPerWord));
2423 
2424         __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2425         __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2426         assert_different_registers(c_rarg0, dst, dst_pos, length);
2427         __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2428         __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2429         assert_different_registers(c_rarg1, dst, length);
2430         __ uxtw(c_rarg2, length);
2431         assert_different_registers(c_rarg2, dst);
2432 
2433         __ load_klass(c_rarg4, dst);
2434         __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2435         __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2436         __ far_call(RuntimeAddress(copyfunc_addr));
2437 
2438 #ifndef PRODUCT
2439         if (PrintC1Statistics) {
2440           Label failed;
2441           __ cbnz(r0, failed);
2442           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2443           __ bind(failed);
2444         }
2445 #endif
2446 
2447         __ cbz(r0, *stub->continuation());
2448 
2449 #ifndef PRODUCT
2450         if (PrintC1Statistics) {
2451           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2452         }
2453 #endif
2454         assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2455 
2456         // Restore previously spilled arguments
2457         __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2458         __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2459         __ ldr(src,              Address(sp, 4*BytesPerWord));
2460 
2461         // return value is -1^K where K is partial copied count
2462         __ eonw(rscratch1, r0, zr);
2463         // adjust length down and src/end pos up by partial copied count
2464         __ subw(length, length, rscratch1);
2465         __ addw(src_pos, src_pos, rscratch1);
2466         __ addw(dst_pos, dst_pos, rscratch1);
2467       }
2468 
2469       __ b(*stub->entry());
2470 
2471       __ bind(cont);
2472       __ POP(src, dst);
2473     }
2474   }
2475 
2476 #ifdef ASSERT
2477   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2478     // Sanity check the known type with the incoming class.  For the
2479     // primitive case the types must match exactly with src.klass and
2480     // dst.klass each exactly matching the default type.  For the
2481     // object array case, if no type check is needed then either the
2482     // dst type is exactly the expected type and the src type is a
2483     // subtype which we can't check or src is the same array as dst
2484     // but not necessarily exactly of type default_type.
2485     Label known_ok, halt;
2486     __ mov_metadata(tmp, default_type->constant_encoding());



2487 
2488     if (basic_type != T_OBJECT) {
2489       __ cmp_klass(dst, tmp, rscratch1);







2490       __ br(Assembler::NE, halt);
2491       __ cmp_klass(src, tmp, rscratch1);






2492       __ br(Assembler::EQ, known_ok);
2493     } else {
2494       __ cmp_klass(dst, tmp, rscratch1);






2495       __ br(Assembler::EQ, known_ok);
2496       __ cmp(src, dst);
2497       __ br(Assembler::EQ, known_ok);
2498     }
2499     __ bind(halt);
2500     __ stop("incorrect type information in arraycopy");
2501     __ bind(known_ok);
2502   }
2503 #endif
2504 
2505 #ifndef PRODUCT
2506   if (PrintC1Statistics) {
2507     __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2508   }
2509 #endif
2510 
2511   __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2512   __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2513   assert_different_registers(c_rarg0, dst, dst_pos, length);
2514   __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2515   __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2516   assert_different_registers(c_rarg1, dst, length);
2517   __ uxtw(c_rarg2, length);
2518   assert_different_registers(c_rarg2, dst);
2519 
2520   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2521   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2522   const char *name;
2523   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2524 
2525  CodeBlob *cb = CodeCache::find_blob(entry);
2526  if (cb) {
2527    __ far_call(RuntimeAddress(entry));
2528  } else {
2529    __ call_VM_leaf(entry, 3);
2530  }
2531 
2532   __ bind(*stub->continuation());
2533 }
2534 
2535 
2536 
2537 
2538 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2539   Register obj = op->obj_opr()->as_register();  // may not be an oop
2540   Register hdr = op->hdr_opr()->as_register();
2541   Register lock = op->lock_opr()->as_register();
2542   if (LockingMode == LM_MONITOR) {
2543     if (op->info() != NULL) {
2544       add_debug_info_for_null_check_here(op->info());
2545       __ null_check(obj, -1);
2546     }
2547     __ b(*op->stub()->entry());
2548   } else if (op->code() == lir_lock) {
2549     Register scratch = noreg;
2550     if (UseBiasedLocking) {
2551       scratch = op->scratch_opr()->as_register();
2552     }
2553     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2554     // add debug info for NullPointerException only if one is possible
2555     int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2556     if (op->info() != NULL) {
2557       add_debug_info_for_null_check(null_check_offset, op->info());
2558     }
2559     // done
2560   } else if (op->code() == lir_unlock) {
2561     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2562     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2563   } else {
2564     Unimplemented();
2565   }
2566   __ bind(*op->stub()->continuation());
2567 }
2568 
2569 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2570   Register obj = op->obj()->as_pointer_register();
2571   Register result = op->result_opr()->as_pointer_register();
2572 
2573   CodeEmitInfo* info = op->info();
2574   if (info != NULL) {
2575     add_debug_info_for_null_check_here(info);
2576   }
2577 
2578   if (UseCompressedClassPointers) {
2579     if (UseCompactObjectHeaders) {
2580       // Check if we can take the (common) fast path, if obj is unlocked.
2581       __ ldr(result, Address(obj, oopDesc::mark_offset_in_bytes()));
2582       __ tst(result, markWord::monitor_value);
2583       __ br(Assembler::NE, *op->stub()->entry());
2584       __ bind(*op->stub()->continuation());
2585 
2586       // Shift to get proper narrow Klass*.
2587       __ lsr(result, result, markWord::klass_shift);
2588     } else {
2589       __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes()));
2590     }
2591     __ decode_klass_not_null(result);
2592   } else {
2593     __ ldr(result, Address (obj, oopDesc::klass_offset_in_bytes()));
2594   }
2595 }
2596 
2597 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2598   ciMethod* method = op->profiled_method();
2599   int bci          = op->profiled_bci();
2600   ciMethod* callee = op->profiled_callee();
2601 
2602   // Update counter for all call types
2603   ciMethodData* md = method->method_data_or_null();
2604   assert(md != NULL, "Sanity");
2605   ciProfileData* data = md->bci_to_data(bci);
2606   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2607   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2608   Register mdo  = op->mdo()->as_register();
2609   __ mov_metadata(mdo, md->constant_encoding());
2610   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2611   // Perform additional virtual call profiling for invokevirtual and
2612   // invokeinterface bytecodes
2613   if (op->should_profile_receiver_type()) {
2614     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2615     Register recv = op->recv()->as_register();
2616     assert_different_registers(mdo, recv);
2617     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2618     ciKlass* known_klass = op->known_holder();
2619     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2620       // We know the type that will be seen at this call site; we can
2621       // statically update the MethodData* rather than needing to do
2622       // dynamic tests on the receiver type
2623 
2624       // NOTE: we should probably put a lock around this search to
2625       // avoid collisions by concurrent compilations
2626       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2627       uint i;
2628       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2629         ciKlass* receiver = vc_data->receiver(i);
2630         if (known_klass->equals(receiver)) {
2631           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2632           __ addptr(data_addr, DataLayout::counter_increment);
2633           return;
2634         }
2635       }
2636 
2637       // Receiver type not found in profile data; select an empty slot
2638 
2639       // Note that this is less efficient than it should be because it
2640       // always does a write to the receiver part of the
2641       // VirtualCallData rather than just the first time
2642       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2643         ciKlass* receiver = vc_data->receiver(i);
2644         if (receiver == NULL) {
2645           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2646           __ mov_metadata(rscratch1, known_klass->constant_encoding());
2647           __ lea(rscratch2, recv_addr);
2648           __ str(rscratch1, Address(rscratch2));
2649           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2650           __ addptr(data_addr, DataLayout::counter_increment);
2651           return;
2652         }
2653       }
2654     } else {
2655       __ load_klass(recv, recv);
2656       Label update_done;
2657       type_profile_helper(mdo, md, data, recv, &update_done);
2658       // Receiver did not match any saved receiver and there is no empty row for it.
2659       // Increment total counter to indicate polymorphic case.
2660       __ addptr(counter_addr, DataLayout::counter_increment);
2661 
2662       __ bind(update_done);
2663     }
2664   } else {
2665     // Static call
2666     __ addptr(counter_addr, DataLayout::counter_increment);
2667   }
2668 }
2669 
2670 
2671 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2672   Unimplemented();
2673 }
2674 
2675 
2676 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2677   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2678 }
2679 
2680 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2681   assert(op->crc()->is_single_cpu(),  "crc must be register");
2682   assert(op->val()->is_single_cpu(),  "byte value must be register");
2683   assert(op->result_opr()->is_single_cpu(), "result must be register");
2684   Register crc = op->crc()->as_register();
2685   Register val = op->val()->as_register();
2686   Register res = op->result_opr()->as_register();
2687 
2688   assert_different_registers(val, crc, res);
2689   uint64_t offset;
2690   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2691   if (offset) __ add(res, res, offset);
2692 
2693   __ mvnw(crc, crc); // ~crc
2694   __ update_byte_crc32(crc, val, res);
2695   __ mvnw(res, crc); // ~crc
2696 }
2697 
2698 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2699   COMMENT("emit_profile_type {");
2700   Register obj = op->obj()->as_register();
2701   Register tmp = op->tmp()->as_pointer_register();
2702   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2703   ciKlass* exact_klass = op->exact_klass();
2704   intptr_t current_klass = op->current_klass();
2705   bool not_null = op->not_null();
2706   bool no_conflict = op->no_conflict();
2707 
2708   Label update, next, none;
2709 
2710   bool do_null = !not_null;
2711   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2712   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2713 
2714   assert(do_null || do_update, "why are we here?");
2715   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2716   assert(mdo_addr.base() != rscratch1, "wrong register");
2717 
2718   __ verify_oop(obj);
2719 
2720   if (tmp != obj) {
2721     __ mov(tmp, obj);
2722   }
2723   if (do_null) {
2724     __ cbnz(tmp, update);
2725     if (!TypeEntries::was_null_seen(current_klass)) {
2726       __ ldr(rscratch2, mdo_addr);
2727       __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2728       __ str(rscratch2, mdo_addr);
2729     }
2730     if (do_update) {
2731 #ifndef ASSERT
2732       __ b(next);
2733     }
2734 #else
2735       __ b(next);
2736     }
2737   } else {
2738     __ cbnz(tmp, update);
2739     __ stop("unexpected null obj");
2740 #endif
2741   }
2742 
2743   __ bind(update);
2744 
2745   if (do_update) {
2746 #ifdef ASSERT
2747     if (exact_klass != NULL) {
2748       Label ok;
2749       __ load_klass(tmp, tmp);
2750       __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2751       __ eor(rscratch1, tmp, rscratch1);
2752       __ cbz(rscratch1, ok);
2753       __ stop("exact klass and actual klass differ");
2754       __ bind(ok);
2755     }
2756 #endif
2757     if (!no_conflict) {
2758       if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2759         if (exact_klass != NULL) {
2760           __ mov_metadata(tmp, exact_klass->constant_encoding());
2761         } else {
2762           __ load_klass(tmp, tmp);
2763         }
2764 
2765         __ ldr(rscratch2, mdo_addr);
2766         __ eor(tmp, tmp, rscratch2);
2767         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2768         // klass seen before, nothing to do. The unknown bit may have been
2769         // set already but no need to check.
2770         __ cbz(rscratch1, next);
2771 
2772         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2773 
2774         if (TypeEntries::is_type_none(current_klass)) {
2775           __ cbz(rscratch2, none);
2776           __ cmp(rscratch2, (u1)TypeEntries::null_seen);
2777           __ br(Assembler::EQ, none);
2778           // There is a chance that the checks above (re-reading profiling
2779           // data from memory) fail if another thread has just set the
2780           // profiling to this obj's klass
2781           __ dmb(Assembler::ISHLD);
2782           __ ldr(rscratch2, mdo_addr);
2783           __ eor(tmp, tmp, rscratch2);
2784           __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2785           __ cbz(rscratch1, next);
2786         }
2787       } else {
2788         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2789                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2790 
2791         __ ldr(tmp, mdo_addr);
2792         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2793       }
2794 
2795       // different than before. Cannot keep accurate profile.
2796       __ ldr(rscratch2, mdo_addr);
2797       __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2798       __ str(rscratch2, mdo_addr);
2799 
2800       if (TypeEntries::is_type_none(current_klass)) {
2801         __ b(next);
2802 
2803         __ bind(none);
2804         // first time here. Set profile type.
2805         __ str(tmp, mdo_addr);
2806       }
2807     } else {
2808       // There's a single possible klass at this profile point
2809       assert(exact_klass != NULL, "should be");
2810       if (TypeEntries::is_type_none(current_klass)) {
2811         __ mov_metadata(tmp, exact_klass->constant_encoding());
2812         __ ldr(rscratch2, mdo_addr);
2813         __ eor(tmp, tmp, rscratch2);
2814         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2815         __ cbz(rscratch1, next);
2816 #ifdef ASSERT
2817         {
2818           Label ok;
2819           __ ldr(rscratch1, mdo_addr);
2820           __ cbz(rscratch1, ok);
2821           __ cmp(rscratch1, (u1)TypeEntries::null_seen);
2822           __ br(Assembler::EQ, ok);
2823           // may have been set by another thread
2824           __ dmb(Assembler::ISHLD);
2825           __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2826           __ ldr(rscratch2, mdo_addr);
2827           __ eor(rscratch2, rscratch1, rscratch2);
2828           __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2829           __ cbz(rscratch2, ok);
2830 
2831           __ stop("unexpected profiling mismatch");
2832           __ bind(ok);
2833         }
2834 #endif
2835         // first time here. Set profile type.
2836         __ str(tmp, mdo_addr);
2837       } else {
2838         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2839                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2840 
2841         __ ldr(tmp, mdo_addr);
2842         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2843 
2844         __ orr(tmp, tmp, TypeEntries::type_unknown);
2845         __ str(tmp, mdo_addr);
2846         // FIXME: Write barrier needed here?
2847       }
2848     }
2849 
2850     __ bind(next);
2851   }
2852   COMMENT("} emit_profile_type");
2853 }
2854 
2855 
2856 void LIR_Assembler::align_backward_branch_target() {
2857 }
2858 
2859 
2860 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2861   // tmp must be unused
2862   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2863 
2864   if (left->is_single_cpu()) {
2865     assert(dest->is_single_cpu(), "expect single result reg");
2866     __ negw(dest->as_register(), left->as_register());
2867   } else if (left->is_double_cpu()) {
2868     assert(dest->is_double_cpu(), "expect double result reg");
2869     __ neg(dest->as_register_lo(), left->as_register_lo());
2870   } else if (left->is_single_fpu()) {
2871     assert(dest->is_single_fpu(), "expect single float result reg");
2872     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2873   } else {
2874     assert(left->is_double_fpu(), "expect double float operand reg");
2875     assert(dest->is_double_fpu(), "expect double float result reg");
2876     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2877   }
2878 }
2879 
2880 
2881 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2882   if (patch_code != lir_patch_none) {
2883     deoptimize_trap(info);
2884     return;
2885   }
2886 
2887   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2888 }
2889 
2890 
2891 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2892   assert(!tmp->is_valid(), "don't need temporary");
2893 
2894   CodeBlob *cb = CodeCache::find_blob(dest);
2895   if (cb) {
2896     __ far_call(RuntimeAddress(dest));
2897   } else {
2898     __ mov(rscratch1, RuntimeAddress(dest));
2899     __ blr(rscratch1);
2900   }
2901 
2902   if (info != NULL) {
2903     add_call_info_here(info);
2904   }
2905 }
2906 
2907 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2908   if (dest->is_address() || src->is_address()) {
2909     move_op(src, dest, type, lir_patch_none, info,
2910             /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
2911   } else {
2912     ShouldNotReachHere();
2913   }
2914 }
2915 
2916 #ifdef ASSERT
2917 // emit run-time assertion
2918 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2919   assert(op->code() == lir_assert, "must be");
2920 
2921   if (op->in_opr1()->is_valid()) {
2922     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2923     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2924   } else {
2925     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2926     assert(op->condition() == lir_cond_always, "no other conditions allowed");
2927   }
2928 
2929   Label ok;
2930   if (op->condition() != lir_cond_always) {
2931     Assembler::Condition acond = Assembler::AL;
2932     switch (op->condition()) {
2933       case lir_cond_equal:        acond = Assembler::EQ;  break;
2934       case lir_cond_notEqual:     acond = Assembler::NE;  break;
2935       case lir_cond_less:         acond = Assembler::LT;  break;
2936       case lir_cond_lessEqual:    acond = Assembler::LE;  break;
2937       case lir_cond_greaterEqual: acond = Assembler::GE;  break;
2938       case lir_cond_greater:      acond = Assembler::GT;  break;
2939       case lir_cond_belowEqual:   acond = Assembler::LS;  break;
2940       case lir_cond_aboveEqual:   acond = Assembler::HS;  break;
2941       default:                    ShouldNotReachHere();
2942     }
2943     __ br(acond, ok);
2944   }
2945   if (op->halt()) {
2946     const char* str = __ code_string(op->msg());
2947     __ stop(str);
2948   } else {
2949     breakpoint();
2950   }
2951   __ bind(ok);
2952 }
2953 #endif
2954 
2955 #ifndef PRODUCT
2956 #define COMMENT(x)   do { __ block_comment(x); } while (0)
2957 #else
2958 #define COMMENT(x)
2959 #endif
2960 
2961 void LIR_Assembler::membar() {
2962   COMMENT("membar");
2963   __ membar(MacroAssembler::AnyAny);
2964 }
2965 
2966 void LIR_Assembler::membar_acquire() {
2967   __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2968 }
2969 
2970 void LIR_Assembler::membar_release() {
2971   __ membar(Assembler::LoadStore|Assembler::StoreStore);
2972 }
2973 
2974 void LIR_Assembler::membar_loadload() {
2975   __ membar(Assembler::LoadLoad);
2976 }
2977 
2978 void LIR_Assembler::membar_storestore() {
2979   __ membar(MacroAssembler::StoreStore);
2980 }
2981 
2982 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2983 
2984 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2985 
2986 void LIR_Assembler::on_spin_wait() {
2987   __ spin_wait();
2988 }
2989 
2990 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2991   __ mov(result_reg->as_register(), rthread);
2992 }
2993 
2994 
2995 void LIR_Assembler::peephole(LIR_List *lir) {
2996 #if 0
2997   if (tableswitch_count >= max_tableswitches)
2998     return;
2999 
3000   /*
3001     This finite-state automaton recognizes sequences of compare-and-
3002     branch instructions.  We will turn them into a tableswitch.  You
3003     could argue that C1 really shouldn't be doing this sort of
3004     optimization, but without it the code is really horrible.
3005   */
3006 
3007   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3008   int first_key, last_key = -2147483648;
3009   int next_key = 0;
3010   int start_insn = -1;
3011   int last_insn = -1;
3012   Register reg = noreg;
3013   LIR_Opr reg_opr;
3014   state = start_s;
3015 
3016   LIR_OpList* inst = lir->instructions_list();
3017   for (int i = 0; i < inst->length(); i++) {
3018     LIR_Op* op = inst->at(i);
3019     switch (state) {
3020     case start_s:
3021       first_key = -1;
3022       start_insn = i;
3023       switch (op->code()) {
3024       case lir_cmp:
3025         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3026         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3027         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3028             && opr2->is_constant()
3029             && opr2->type() == T_INT) {
3030           reg_opr = opr1;
3031           reg = opr1->as_register();
3032           first_key = opr2->as_constant_ptr()->as_jint();
3033           next_key = first_key + 1;
3034           state = cmp_s;
3035           goto next_state;
3036         }
3037         break;
3038       }
3039       break;
3040     case cmp_s:
3041       switch (op->code()) {
3042       case lir_branch:
3043         if (op->as_OpBranch()->cond() == lir_cond_equal) {
3044           state = beq_s;
3045           last_insn = i;
3046           goto next_state;
3047         }
3048       }
3049       state = start_s;
3050       break;
3051     case beq_s:
3052       switch (op->code()) {
3053       case lir_cmp: {
3054         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3055         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3056         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3057             && opr1->as_register() == reg
3058             && opr2->is_constant()
3059             && opr2->type() == T_INT
3060             && opr2->as_constant_ptr()->as_jint() == next_key) {
3061           last_key = next_key;
3062           next_key++;
3063           state = cmp_s;
3064           goto next_state;
3065         }
3066       }
3067       }
3068       last_key = next_key;
3069       state = start_s;
3070       break;
3071     default:
3072       assert(false, "impossible state");
3073     }
3074     if (state == start_s) {
3075       if (first_key < last_key - 5L && reg != noreg) {
3076         {
3077           // printf("found run register %d starting at insn %d low value %d high value %d\n",
3078           //        reg->encoding(),
3079           //        start_insn, first_key, last_key);
3080           //   for (int i = 0; i < inst->length(); i++) {
3081           //     inst->at(i)->print();
3082           //     tty->print("\n");
3083           //   }
3084           //   tty->print("\n");
3085         }
3086 
3087         struct tableswitch *sw = &switches[tableswitch_count];
3088         sw->_insn_index = start_insn, sw->_first_key = first_key,
3089           sw->_last_key = last_key, sw->_reg = reg;
3090         inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3091         {
3092           // Insert the new table of branches
3093           int offset = last_insn;
3094           for (int n = first_key; n < last_key; n++) {
3095             inst->insert_before
3096               (last_insn + 1,
3097                new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3098                                 inst->at(offset)->as_OpBranch()->label()));
3099             offset -= 2, i++;
3100           }
3101         }
3102         // Delete all the old compare-and-branch instructions
3103         for (int n = first_key; n < last_key; n++) {
3104           inst->remove_at(start_insn);
3105           inst->remove_at(start_insn);
3106         }
3107         // Insert the tableswitch instruction
3108         inst->insert_before(start_insn,
3109                             new LIR_Op2(lir_cmp, lir_cond_always,
3110                                         LIR_OprFact::intConst(tableswitch_count),
3111                                         reg_opr));
3112         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3113         tableswitch_count++;
3114       }
3115       reg = noreg;
3116       last_key = -2147483648;
3117     }
3118   next_state:
3119     ;
3120   }
3121 #endif
3122 }
3123 
3124 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3125   Address addr = as_Address(src->as_address_ptr());
3126   BasicType type = src->type();
3127   bool is_oop = is_reference_type(type);
3128 
3129   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3130   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3131 
3132   switch(type) {
3133   case T_INT:
3134     xchg = &MacroAssembler::atomic_xchgalw;
3135     add = &MacroAssembler::atomic_addalw;
3136     break;
3137   case T_LONG:
3138     xchg = &MacroAssembler::atomic_xchgal;
3139     add = &MacroAssembler::atomic_addal;
3140     break;
3141   case T_OBJECT:
3142   case T_ARRAY:
3143     if (UseCompressedOops) {
3144       xchg = &MacroAssembler::atomic_xchgalw;
3145       add = &MacroAssembler::atomic_addalw;
3146     } else {
3147       xchg = &MacroAssembler::atomic_xchgal;
3148       add = &MacroAssembler::atomic_addal;
3149     }
3150     break;
3151   default:
3152     ShouldNotReachHere();
3153     xchg = &MacroAssembler::atomic_xchgal;
3154     add = &MacroAssembler::atomic_addal; // unreachable
3155   }
3156 
3157   switch (code) {
3158   case lir_xadd:
3159     {
3160       RegisterOrConstant inc;
3161       Register tmp = as_reg(tmp_op);
3162       Register dst = as_reg(dest);
3163       if (data->is_constant()) {
3164         inc = RegisterOrConstant(as_long(data));
3165         assert_different_registers(dst, addr.base(), tmp,
3166                                    rscratch1, rscratch2);
3167       } else {
3168         inc = RegisterOrConstant(as_reg(data));
3169         assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3170                                    rscratch1, rscratch2);
3171       }
3172       __ lea(tmp, addr);
3173       (_masm->*add)(dst, inc, tmp);
3174       break;
3175     }
3176   case lir_xchg:
3177     {
3178       Register tmp = tmp_op->as_register();
3179       Register obj = as_reg(data);
3180       Register dst = as_reg(dest);
3181       if (is_oop && UseCompressedOops) {
3182         __ encode_heap_oop(rscratch2, obj);
3183         obj = rscratch2;
3184       }
3185       assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3186       __ lea(tmp, addr);
3187       (_masm->*xchg)(dst, obj, tmp);
3188       if (is_oop && UseCompressedOops) {
3189         __ decode_heap_oop(dst);
3190       }
3191     }
3192     break;
3193   default:
3194     ShouldNotReachHere();
3195   }
3196   __ membar(__ AnyAny);
3197 }
3198 
3199 #undef __
--- EOF ---