1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInstance.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_aarch64.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_aarch64.inline.hpp"
  46 
  47 
  48 #ifndef PRODUCT
  49 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  50 #else
  51 #define COMMENT(x)
  52 #endif
  53 
  54 NEEDS_CLEANUP // remove this definitions ?
  55 const Register SYNC_header = r0;   // synchronization header
  56 const Register SHIFT_count = r0;   // where count for shift operations must be
  57 
  58 #define __ _masm->
  59 
  60 
  61 static void select_different_registers(Register preserve,
  62                                        Register extra,
  63                                        Register &tmp1,
  64                                        Register &tmp2) {
  65   if (tmp1 == preserve) {
  66     assert_different_registers(tmp1, tmp2, extra);
  67     tmp1 = extra;
  68   } else if (tmp2 == preserve) {
  69     assert_different_registers(tmp1, tmp2, extra);
  70     tmp2 = extra;
  71   }
  72   assert_different_registers(preserve, tmp1, tmp2);
  73 }
  74 
  75 
  76 
  77 static void select_different_registers(Register preserve,
  78                                        Register extra,
  79                                        Register &tmp1,
  80                                        Register &tmp2,
  81                                        Register &tmp3) {
  82   if (tmp1 == preserve) {
  83     assert_different_registers(tmp1, tmp2, tmp3, extra);
  84     tmp1 = extra;
  85   } else if (tmp2 == preserve) {
  86     assert_different_registers(tmp1, tmp2, tmp3, extra);
  87     tmp2 = extra;
  88   } else if (tmp3 == preserve) {
  89     assert_different_registers(tmp1, tmp2, tmp3, extra);
  90     tmp3 = extra;
  91   }
  92   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  93 }
  94 
  95 
  96 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  97 
  98 
  99 LIR_Opr LIR_Assembler::receiverOpr() {
 100   return FrameMap::receiver_opr;
 101 }
 102 
 103 LIR_Opr LIR_Assembler::osrBufferPointer() {
 104   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 105 }
 106 
 107 //--------------fpu register translations-----------------------
 108 
 109 
 110 address LIR_Assembler::float_constant(float f) {
 111   address const_addr = __ float_constant(f);
 112   if (const_addr == nullptr) {
 113     bailout("const section overflow");
 114     return __ code()->consts()->start();
 115   } else {
 116     return const_addr;
 117   }
 118 }
 119 
 120 
 121 address LIR_Assembler::double_constant(double d) {
 122   address const_addr = __ double_constant(d);
 123   if (const_addr == nullptr) {
 124     bailout("const section overflow");
 125     return __ code()->consts()->start();
 126   } else {
 127     return const_addr;
 128   }
 129 }
 130 
 131 address LIR_Assembler::int_constant(jlong n) {
 132   address const_addr = __ long_constant(n);
 133   if (const_addr == nullptr) {
 134     bailout("const section overflow");
 135     return __ code()->consts()->start();
 136   } else {
 137     return const_addr;
 138   }
 139 }
 140 
 141 void LIR_Assembler::breakpoint() { Unimplemented(); }
 142 
 143 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 144 
 145 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 146 
 147 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
 148 //-------------------------------------------
 149 
 150 static Register as_reg(LIR_Opr op) {
 151   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 152 }
 153 
 154 static jlong as_long(LIR_Opr data) {
 155   jlong result;
 156   switch (data->type()) {
 157   case T_INT:
 158     result = (data->as_jint());
 159     break;
 160   case T_LONG:
 161     result = (data->as_jlong());
 162     break;
 163   default:
 164     ShouldNotReachHere();
 165     result = 0;  // unreachable
 166   }
 167   return result;
 168 }
 169 
 170 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 171   Register base = addr->base()->as_pointer_register();
 172   LIR_Opr opr = addr->index();
 173   if (opr->is_cpu_register()) {
 174     Register index;
 175     if (opr->is_single_cpu())
 176       index = opr->as_register();
 177     else
 178       index = opr->as_register_lo();
 179     assert(addr->disp() == 0, "must be");
 180     switch(opr->type()) {
 181       case T_INT:
 182         return Address(base, index, Address::sxtw(addr->scale()));
 183       case T_LONG:
 184         return Address(base, index, Address::lsl(addr->scale()));
 185       default:
 186         ShouldNotReachHere();
 187       }
 188   } else {
 189     assert(addr->scale() == 0,
 190            "expected for immediate operand, was: %d", addr->scale());
 191     ptrdiff_t offset = ptrdiff_t(addr->disp());
 192     // NOTE: Does not handle any 16 byte vector access.
 193     const uint type_size = type2aelembytes(addr->type(), true);
 194     return __ legitimize_address(Address(base, offset), type_size, tmp);
 195   }
 196   return Address();
 197 }
 198 
 199 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 200   ShouldNotReachHere();
 201   return Address();
 202 }
 203 
 204 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 205   return as_Address(addr, rscratch1);
 206 }
 207 
 208 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 209   return as_Address(addr, rscratch1);  // Ouch
 210   // FIXME: This needs to be much more clever.  See x86.
 211 }
 212 
 213 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
 214 // not encodable as a base + (immediate) offset, generate an explicit address
 215 // calculation to hold the address in a temporary register.
 216 Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
 217   precond(size == 4 || size == 8);
 218   Address addr = frame_map()->address_for_slot(index, adjust);
 219   precond(addr.getMode() == Address::base_plus_offset);
 220   precond(addr.base() == sp);
 221   precond(addr.offset() > 0);
 222   uint mask = size - 1;
 223   assert((addr.offset() & mask) == 0, "scaled offsets only");
 224   return __ legitimize_address(addr, size, tmp);
 225 }
 226 
 227 void LIR_Assembler::osr_entry() {
 228   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 229   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 230   ValueStack* entry_state = osr_entry->state();
 231   int number_of_locks = entry_state->locks_size();
 232 
 233   // we jump here if osr happens with the interpreter
 234   // state set up to continue at the beginning of the
 235   // loop that triggered osr - in particular, we have
 236   // the following registers setup:
 237   //
 238   // r2: osr buffer
 239   //
 240 
 241   // build frame
 242   ciMethod* m = compilation()->method();
 243   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 244 
 245   // OSR buffer is
 246   //
 247   // locals[nlocals-1..0]
 248   // monitors[0..number_of_locks]
 249   //
 250   // locals is a direct copy of the interpreter frame so in the osr buffer
 251   // so first slot in the local array is the last local from the interpreter
 252   // and last slot is local[0] (receiver) from the interpreter
 253   //
 254   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 255   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 256   // in the interpreter frame (the method lock if a sync method)
 257 
 258   // Initialize monitors in the compiled activation.
 259   //   r2: pointer to osr buffer
 260   //
 261   // All other registers are dead at this point and the locals will be
 262   // copied into place by code emitted in the IR.
 263 
 264   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 265   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 266     int monitor_offset = BytesPerWord * method()->max_locals() +
 267       (2 * BytesPerWord) * (number_of_locks - 1);
 268     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 269     // the OSR buffer using 2 word entries: first the lock and then
 270     // the oop.
 271     for (int i = 0; i < number_of_locks; i++) {
 272       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 273 #ifdef ASSERT
 274       // verify the interpreter's monitor has a non-null object
 275       {
 276         Label L;
 277         __ ldr(rscratch1, __ form_address(rscratch1, OSR_buf, slot_offset + 1*BytesPerWord, 0));
 278         __ cbnz(rscratch1, L);
 279         __ stop("locked object is null");
 280         __ bind(L);
 281       }
 282 #endif
 283       __ ldr(r19, __ form_address(rscratch1, OSR_buf, slot_offset, 0));
 284       __ ldr(r20, __ form_address(rscratch1, OSR_buf, slot_offset + BytesPerWord, 0));
 285       __ str(r19, frame_map()->address_for_monitor_lock(i));
 286       __ str(r20, frame_map()->address_for_monitor_object(i));
 287     }
 288   }
 289 }
 290 
 291 
 292 // inline cache check; done before the frame is built.
 293 int LIR_Assembler::check_icache() {
 294   return __ ic_check(CodeEntryAlignment);
 295 }
 296 
 297 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 298   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 299   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
 300 
 301   Label L_skip_barrier;
 302 
 303   __ mov_metadata(rscratch2, method->holder()->constant_encoding());
 304   __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);
 305   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 306   __ bind(L_skip_barrier);
 307 }
 308 
 309 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 310   if (o == nullptr) {
 311     __ mov(reg, zr);
 312   } else {
 313     __ movoop(reg, o);
 314   }
 315 }
 316 
 317 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 318   address target = nullptr;
 319   relocInfo::relocType reloc_type = relocInfo::none;
 320 
 321   switch (patching_id(info)) {
 322   case PatchingStub::access_field_id:
 323     target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
 324     reloc_type = relocInfo::section_word_type;
 325     break;
 326   case PatchingStub::load_klass_id:
 327     target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
 328     reloc_type = relocInfo::metadata_type;
 329     break;
 330   case PatchingStub::load_mirror_id:
 331     target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
 332     reloc_type = relocInfo::oop_type;
 333     break;
 334   case PatchingStub::load_appendix_id:
 335     target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
 336     reloc_type = relocInfo::oop_type;
 337     break;
 338   default: ShouldNotReachHere();
 339   }
 340 
 341   __ far_call(RuntimeAddress(target));
 342   add_call_info_here(info);
 343 }
 344 
 345 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 346   deoptimize_trap(info);
 347 }
 348 
 349 
 350 // This specifies the rsp decrement needed to build the frame
 351 int LIR_Assembler::initial_frame_size_in_bytes() const {
 352   // if rounding, must let FrameMap know!
 353 
 354   return in_bytes(frame_map()->framesize_in_bytes());
 355 }
 356 
 357 
 358 int LIR_Assembler::emit_exception_handler() {
 359   // generate code for exception handler
 360   address handler_base = __ start_a_stub(exception_handler_size());
 361   if (handler_base == nullptr) {
 362     // not enough space left for the handler
 363     bailout("exception handler overflow");
 364     return -1;
 365   }
 366 
 367   int offset = code_offset();
 368 
 369   // the exception oop and pc are in r0, and r3
 370   // no other registers need to be preserved, so invalidate them
 371   __ invalidate_registers(false, true, true, false, true, true);
 372 
 373   // check that there is really an exception
 374   __ verify_not_null_oop(r0);
 375 
 376   // search an exception handler (r0: exception oop, r3: throwing pc)
 377   __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
 378   __ should_not_reach_here();
 379   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 380   __ end_a_stub();
 381 
 382   return offset;
 383 }
 384 
 385 
 386 // Emit the code to remove the frame from the stack in the exception
 387 // unwind path.
 388 int LIR_Assembler::emit_unwind_handler() {
 389 #ifndef PRODUCT
 390   if (CommentedAssembly) {
 391     _masm->block_comment("Unwind handler");
 392   }
 393 #endif
 394 
 395   int offset = code_offset();
 396 
 397   // Fetch the exception from TLS and clear out exception related thread state
 398   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 399   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 400   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 401 
 402   __ bind(_unwind_handler_entry);
 403   __ verify_not_null_oop(r0);
 404   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 405     __ mov(r19, r0);  // Preserve the exception
 406   }
 407 
 408   // Perform needed unlocking
 409   MonitorExitStub* stub = nullptr;
 410   if (method()->is_synchronized()) {
 411     monitor_address(0, FrameMap::r0_opr);
 412     stub = new MonitorExitStub(FrameMap::r0_opr, 0);
 413     __ unlock_object(r5, r4, r0, r6, *stub->entry());
 414     __ bind(*stub->continuation());
 415   }
 416 
 417   if (compilation()->env()->dtrace_method_probes()) {
 418     __ mov(c_rarg0, rthread);
 419     __ mov_metadata(c_rarg1, method()->constant_encoding());
 420     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 421   }
 422 
 423   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 424     __ mov(r0, r19);  // Restore the exception
 425   }
 426 
 427   // remove the activation and dispatch to the unwind handler
 428   __ block_comment("remove_frame and dispatch to the unwind handler");
 429   __ remove_frame(initial_frame_size_in_bytes());
 430   __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
 431 
 432   // Emit the slow path assembly
 433   if (stub != nullptr) {
 434     stub->emit_code(this);
 435   }
 436 
 437   return offset;
 438 }
 439 
 440 
 441 int LIR_Assembler::emit_deopt_handler() {
 442   // generate code for exception handler
 443   address handler_base = __ start_a_stub(deopt_handler_size());
 444   if (handler_base == nullptr) {
 445     // not enough space left for the handler
 446     bailout("deopt handler overflow");
 447     return -1;
 448   }
 449 
 450   int offset = code_offset();
 451 
 452   Label start;
 453   __ bind(start);
 454 
 455   __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 456 
 457   int entry_offset = __ offset();
 458   __ b(start);
 459 
 460   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 461   assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
 462          "out of bounds read in post-call NOP check");
 463   __ end_a_stub();
 464 
 465   return entry_offset;
 466 }
 467 
 468 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 469   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 470   int pc_offset = code_offset();
 471   flush_debug_info(pc_offset);
 472   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 473   if (info->exception_handlers() != nullptr) {
 474     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 475   }
 476 }
 477 
 478 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 479   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 480 
 481   // Pop the stack before the safepoint code
 482   __ remove_frame(initial_frame_size_in_bytes());
 483 
 484   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 485     __ reserved_stack_check();
 486   }
 487 
 488   code_stub->set_safepoint_offset(__ offset());
 489   __ relocate(relocInfo::poll_return_type);
 490   __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
 491   __ ret(lr);
 492 }
 493 
 494 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 495   guarantee(info != nullptr, "Shouldn't be null");
 496   __ get_polling_page(rscratch1, relocInfo::poll_type);
 497   add_debug_info_for_branch(info);  // This isn't just debug info:
 498                                     // it's the oop map
 499   __ read_polling_page(rscratch1, relocInfo::poll_type);
 500   return __ offset();
 501 }
 502 
 503 
 504 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 505   if (from_reg == r31_sp)
 506     from_reg = sp;
 507   if (to_reg == r31_sp)
 508     to_reg = sp;
 509   __ mov(to_reg, from_reg);
 510 }
 511 
 512 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 513 
 514 
 515 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 516   assert(src->is_constant(), "should not call otherwise");
 517   assert(dest->is_register(), "should not call otherwise");
 518   LIR_Const* c = src->as_constant_ptr();
 519 
 520   switch (c->type()) {
 521     case T_INT: {
 522       assert(patch_code == lir_patch_none, "no patching handled here");
 523       __ movw(dest->as_register(), c->as_jint());
 524       break;
 525     }
 526 
 527     case T_ADDRESS: {
 528       assert(patch_code == lir_patch_none, "no patching handled here");
 529       __ mov(dest->as_register(), c->as_jint());
 530       break;
 531     }
 532 
 533     case T_LONG: {
 534       assert(patch_code == lir_patch_none, "no patching handled here");
 535       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 536       break;
 537     }
 538 
 539     case T_OBJECT: {
 540         if (patch_code == lir_patch_none) {
 541           jobject2reg(c->as_jobject(), dest->as_register());
 542         } else {
 543           jobject2reg_with_patching(dest->as_register(), info);
 544         }
 545       break;
 546     }
 547 
 548     case T_METADATA: {
 549       if (patch_code != lir_patch_none) {
 550         klass2reg_with_patching(dest->as_register(), info);
 551       } else {
 552         __ mov_metadata(dest->as_register(), c->as_metadata());
 553       }
 554       break;
 555     }
 556 
 557     case T_FLOAT: {
 558       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 559         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 560       } else {
 561         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 562         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 563       }
 564       break;
 565     }
 566 
 567     case T_DOUBLE: {
 568       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 569         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 570       } else {
 571         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 572         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 573       }
 574       break;
 575     }
 576 
 577     default:
 578       ShouldNotReachHere();
 579   }
 580 }
 581 
 582 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 583   LIR_Const* c = src->as_constant_ptr();
 584   switch (c->type()) {
 585   case T_OBJECT:
 586     {
 587       if (! c->as_jobject())
 588         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 589       else {
 590         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
 591         reg2stack(FrameMap::rscratch1_opr, dest, c->type());
 592       }
 593     }
 594     break;
 595   case T_ADDRESS:
 596     {
 597       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
 598       reg2stack(FrameMap::rscratch1_opr, dest, c->type());
 599     }
 600   case T_INT:
 601   case T_FLOAT:
 602     {
 603       Register reg = zr;
 604       if (c->as_jint_bits() == 0)
 605         __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 606       else {
 607         __ movw(rscratch1, c->as_jint_bits());
 608         __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
 609       }
 610     }
 611     break;
 612   case T_LONG:
 613   case T_DOUBLE:
 614     {
 615       Register reg = zr;
 616       if (c->as_jlong_bits() == 0)
 617         __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 618                                                  lo_word_offset_in_bytes));
 619       else {
 620         __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
 621         __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
 622                                                         lo_word_offset_in_bytes));
 623       }
 624     }
 625     break;
 626   default:
 627     ShouldNotReachHere();
 628   }
 629 }
 630 
 631 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 632   assert(src->is_constant(), "should not call otherwise");
 633   LIR_Const* c = src->as_constant_ptr();
 634   LIR_Address* to_addr = dest->as_address_ptr();
 635 
 636   void (Assembler::* insn)(Register Rt, const Address &adr);
 637 
 638   switch (type) {
 639   case T_ADDRESS:
 640     assert(c->as_jint() == 0, "should be");
 641     insn = &Assembler::str;
 642     break;
 643   case T_LONG:
 644     assert(c->as_jlong() == 0, "should be");
 645     insn = &Assembler::str;
 646     break;
 647   case T_INT:
 648     assert(c->as_jint() == 0, "should be");
 649     insn = &Assembler::strw;
 650     break;
 651   case T_OBJECT:
 652   case T_ARRAY:
 653     assert(c->as_jobject() == nullptr, "should be");
 654     if (UseCompressedOops && !wide) {
 655       insn = &Assembler::strw;
 656     } else {
 657       insn = &Assembler::str;
 658     }
 659     break;
 660   case T_CHAR:
 661   case T_SHORT:
 662     assert(c->as_jint() == 0, "should be");
 663     insn = &Assembler::strh;
 664     break;
 665   case T_BOOLEAN:
 666   case T_BYTE:
 667     assert(c->as_jint() == 0, "should be");
 668     insn = &Assembler::strb;
 669     break;
 670   default:
 671     ShouldNotReachHere();
 672     insn = &Assembler::str;  // unreachable
 673   }
 674 
 675   if (info) add_debug_info_for_null_check_here(info);
 676   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 677 }
 678 
 679 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 680   assert(src->is_register(), "should not call otherwise");
 681   assert(dest->is_register(), "should not call otherwise");
 682 
 683   // move between cpu-registers
 684   if (dest->is_single_cpu()) {
 685     if (src->type() == T_LONG) {
 686       // Can do LONG -> OBJECT
 687       move_regs(src->as_register_lo(), dest->as_register());
 688       return;
 689     }
 690     assert(src->is_single_cpu(), "must match");
 691     if (src->type() == T_OBJECT) {
 692       __ verify_oop(src->as_register());
 693     }
 694     move_regs(src->as_register(), dest->as_register());
 695 
 696   } else if (dest->is_double_cpu()) {
 697     if (is_reference_type(src->type())) {
 698       // Surprising to me but we can see move of a long to t_object
 699       __ verify_oop(src->as_register());
 700       move_regs(src->as_register(), dest->as_register_lo());
 701       return;
 702     }
 703     assert(src->is_double_cpu(), "must match");
 704     Register f_lo = src->as_register_lo();
 705     Register f_hi = src->as_register_hi();
 706     Register t_lo = dest->as_register_lo();
 707     Register t_hi = dest->as_register_hi();
 708     assert(f_hi == f_lo, "must be same");
 709     assert(t_hi == t_lo, "must be same");
 710     move_regs(f_lo, t_lo);
 711 
 712   } else if (dest->is_single_fpu()) {
 713     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 714 
 715   } else if (dest->is_double_fpu()) {
 716     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 717 
 718   } else {
 719     ShouldNotReachHere();
 720   }
 721 }
 722 
 723 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 724   precond(src->is_register() && dest->is_stack());
 725 
 726   uint const c_sz32 = sizeof(uint32_t);
 727   uint const c_sz64 = sizeof(uint64_t);
 728 
 729   if (src->is_single_cpu()) {
 730     int index = dest->single_stack_ix();
 731     if (is_reference_type(type)) {
 732       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 733       __ verify_oop(src->as_register());
 734     } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
 735       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 736     } else {
 737       __ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 738     }
 739 
 740   } else if (src->is_double_cpu()) {
 741     int index = dest->double_stack_ix();
 742     Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 743     __ str(src->as_register_lo(), dest_addr_LO);
 744 
 745   } else if (src->is_single_fpu()) {
 746     int index = dest->single_stack_ix();
 747     __ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 748 
 749   } else if (src->is_double_fpu()) {
 750     int index = dest->double_stack_ix();
 751     __ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 752 
 753   } else {
 754     ShouldNotReachHere();
 755   }
 756 }
 757 
 758 
 759 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 760   LIR_Address* to_addr = dest->as_address_ptr();
 761   PatchingStub* patch = nullptr;
 762   Register compressed_src = rscratch1;
 763 
 764   if (patch_code != lir_patch_none) {
 765     deoptimize_trap(info);
 766     return;
 767   }
 768 
 769   if (is_reference_type(type)) {
 770     __ verify_oop(src->as_register());
 771 
 772     if (UseCompressedOops && !wide) {
 773       __ encode_heap_oop(compressed_src, src->as_register());
 774     } else {
 775       compressed_src = src->as_register();
 776     }
 777   }
 778 
 779   int null_check_here = code_offset();
 780   switch (type) {
 781     case T_FLOAT: {
 782       __ strs(src->as_float_reg(), as_Address(to_addr));
 783       break;
 784     }
 785 
 786     case T_DOUBLE: {
 787       __ strd(src->as_double_reg(), as_Address(to_addr));
 788       break;
 789     }
 790 
 791     case T_ARRAY:   // fall through
 792     case T_OBJECT:  // fall through
 793       if (UseCompressedOops && !wide) {
 794         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 795       } else {
 796          __ str(compressed_src, as_Address(to_addr));
 797       }
 798       break;
 799     case T_METADATA:
 800       // We get here to store a method pointer to the stack to pass to
 801       // a dtrace runtime call. This can't work on 64 bit with
 802       // compressed klass ptrs: T_METADATA can be a compressed klass
 803       // ptr or a 64 bit method pointer.
 804       ShouldNotReachHere();
 805       __ str(src->as_register(), as_Address(to_addr));
 806       break;
 807     case T_ADDRESS:
 808       __ str(src->as_register(), as_Address(to_addr));
 809       break;
 810     case T_INT:
 811       __ strw(src->as_register(), as_Address(to_addr));
 812       break;
 813 
 814     case T_LONG: {
 815       __ str(src->as_register_lo(), as_Address_lo(to_addr));
 816       break;
 817     }
 818 
 819     case T_BYTE:    // fall through
 820     case T_BOOLEAN: {
 821       __ strb(src->as_register(), as_Address(to_addr));
 822       break;
 823     }
 824 
 825     case T_CHAR:    // fall through
 826     case T_SHORT:
 827       __ strh(src->as_register(), as_Address(to_addr));
 828       break;
 829 
 830     default:
 831       ShouldNotReachHere();
 832   }
 833   if (info != nullptr) {
 834     add_debug_info_for_null_check(null_check_here, info);
 835   }
 836 }
 837 
 838 
 839 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 840   precond(src->is_stack() && dest->is_register());
 841 
 842   uint const c_sz32 = sizeof(uint32_t);
 843   uint const c_sz64 = sizeof(uint64_t);
 844 
 845   if (dest->is_single_cpu()) {
 846     int index = src->single_stack_ix();
 847     if (is_reference_type(type)) {
 848       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 849       __ verify_oop(dest->as_register());
 850     } else if (type == T_METADATA || type == T_ADDRESS) {
 851       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 852     } else {
 853       __ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 854     }
 855 
 856   } else if (dest->is_double_cpu()) {
 857     int index = src->double_stack_ix();
 858     Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 859     __ ldr(dest->as_register_lo(), src_addr_LO);
 860 
 861   } else if (dest->is_single_fpu()) {
 862     int index = src->single_stack_ix();
 863     __ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 864 
 865   } else if (dest->is_double_fpu()) {
 866     int index = src->double_stack_ix();
 867     __ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 868 
 869   } else {
 870     ShouldNotReachHere();
 871   }
 872 }
 873 
 874 
 875 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 876   address target = nullptr;
 877   relocInfo::relocType reloc_type = relocInfo::none;
 878 
 879   switch (patching_id(info)) {
 880   case PatchingStub::access_field_id:
 881     target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
 882     reloc_type = relocInfo::section_word_type;
 883     break;
 884   case PatchingStub::load_klass_id:
 885     target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
 886     reloc_type = relocInfo::metadata_type;
 887     break;
 888   case PatchingStub::load_mirror_id:
 889     target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
 890     reloc_type = relocInfo::oop_type;
 891     break;
 892   case PatchingStub::load_appendix_id:
 893     target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
 894     reloc_type = relocInfo::oop_type;
 895     break;
 896   default: ShouldNotReachHere();
 897   }
 898 
 899   __ far_call(RuntimeAddress(target));
 900   add_call_info_here(info);
 901 }
 902 
 903 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 904 
 905   LIR_Opr temp;
 906   if (type == T_LONG || type == T_DOUBLE)
 907     temp = FrameMap::rscratch1_long_opr;
 908   else
 909     temp = FrameMap::rscratch1_opr;
 910 
 911   stack2reg(src, temp, src->type());
 912   reg2stack(temp, dest, dest->type());
 913 }
 914 
 915 
 916 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 917   LIR_Address* addr = src->as_address_ptr();
 918   LIR_Address* from_addr = src->as_address_ptr();
 919 
 920   if (addr->base()->type() == T_OBJECT) {
 921     __ verify_oop(addr->base()->as_pointer_register());
 922   }
 923 
 924   if (patch_code != lir_patch_none) {
 925     deoptimize_trap(info);
 926     return;
 927   }
 928 
 929   if (info != nullptr) {
 930     add_debug_info_for_null_check_here(info);
 931   }
 932   int null_check_here = code_offset();
 933   switch (type) {
 934     case T_FLOAT: {
 935       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 936       break;
 937     }
 938 
 939     case T_DOUBLE: {
 940       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 941       break;
 942     }
 943 
 944     case T_ARRAY:   // fall through
 945     case T_OBJECT:  // fall through
 946       if (UseCompressedOops && !wide) {
 947         __ ldrw(dest->as_register(), as_Address(from_addr));
 948       } else {
 949         __ ldr(dest->as_register(), as_Address(from_addr));
 950       }
 951       break;
 952     case T_METADATA:
 953       // We get here to store a method pointer to the stack to pass to
 954       // a dtrace runtime call. This can't work on 64 bit with
 955       // compressed klass ptrs: T_METADATA can be a compressed klass
 956       // ptr or a 64 bit method pointer.
 957       ShouldNotReachHere();
 958       __ ldr(dest->as_register(), as_Address(from_addr));
 959       break;
 960     case T_ADDRESS:
 961       __ ldr(dest->as_register(), as_Address(from_addr));
 962       break;
 963     case T_INT:
 964       __ ldrw(dest->as_register(), as_Address(from_addr));
 965       break;
 966 
 967     case T_LONG: {
 968       __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
 969       break;
 970     }
 971 
 972     case T_BYTE:
 973       __ ldrsb(dest->as_register(), as_Address(from_addr));
 974       break;
 975     case T_BOOLEAN: {
 976       __ ldrb(dest->as_register(), as_Address(from_addr));
 977       break;
 978     }
 979 
 980     case T_CHAR:
 981       __ ldrh(dest->as_register(), as_Address(from_addr));
 982       break;
 983     case T_SHORT:
 984       __ ldrsh(dest->as_register(), as_Address(from_addr));
 985       break;
 986 
 987     default:
 988       ShouldNotReachHere();
 989   }
 990 
 991   if (is_reference_type(type)) {
 992     if (UseCompressedOops && !wide) {
 993       __ decode_heap_oop(dest->as_register());
 994     }
 995 
 996     __ verify_oop(dest->as_register());
 997   }
 998 }
 999 
1000 
1001 int LIR_Assembler::array_element_size(BasicType type) const {
1002   int elem_size = type2aelembytes(type);
1003   return exact_log2(elem_size);
1004 }
1005 
1006 
1007 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1008   switch (op->code()) {
1009   case lir_idiv:
1010   case lir_irem:
1011     arithmetic_idiv(op->code(),
1012                     op->in_opr1(),
1013                     op->in_opr2(),
1014                     op->in_opr3(),
1015                     op->result_opr(),
1016                     op->info());
1017     break;
1018   case lir_fmad:
1019     __ fmaddd(op->result_opr()->as_double_reg(),
1020               op->in_opr1()->as_double_reg(),
1021               op->in_opr2()->as_double_reg(),
1022               op->in_opr3()->as_double_reg());
1023     break;
1024   case lir_fmaf:
1025     __ fmadds(op->result_opr()->as_float_reg(),
1026               op->in_opr1()->as_float_reg(),
1027               op->in_opr2()->as_float_reg(),
1028               op->in_opr3()->as_float_reg());
1029     break;
1030   default:      ShouldNotReachHere(); break;
1031   }
1032 }
1033 
1034 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1035 #ifdef ASSERT
1036   assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1037   if (op->block() != nullptr)  _branch_target_blocks.append(op->block());
1038   if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1039 #endif
1040 
1041   if (op->cond() == lir_cond_always) {
1042     if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1043     __ b(*(op->label()));
1044   } else {
1045     Assembler::Condition acond;
1046     if (op->code() == lir_cond_float_branch) {
1047       bool is_unordered = (op->ublock() == op->block());
1048       // Assembler::EQ does not permit unordered branches, so we add
1049       // another branch here.  Likewise, Assembler::NE does not permit
1050       // ordered branches.
1051       if ((is_unordered && op->cond() == lir_cond_equal)
1052           || (!is_unordered && op->cond() == lir_cond_notEqual))
1053         __ br(Assembler::VS, *(op->ublock()->label()));
1054       switch(op->cond()) {
1055       case lir_cond_equal:        acond = Assembler::EQ; break;
1056       case lir_cond_notEqual:     acond = Assembler::NE; break;
1057       case lir_cond_less:         acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1058       case lir_cond_lessEqual:    acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1059       case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1060       case lir_cond_greater:      acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1061       default:                    ShouldNotReachHere();
1062         acond = Assembler::EQ;  // unreachable
1063       }
1064     } else {
1065       switch (op->cond()) {
1066         case lir_cond_equal:        acond = Assembler::EQ; break;
1067         case lir_cond_notEqual:     acond = Assembler::NE; break;
1068         case lir_cond_less:         acond = Assembler::LT; break;
1069         case lir_cond_lessEqual:    acond = Assembler::LE; break;
1070         case lir_cond_greaterEqual: acond = Assembler::GE; break;
1071         case lir_cond_greater:      acond = Assembler::GT; break;
1072         case lir_cond_belowEqual:   acond = Assembler::LS; break;
1073         case lir_cond_aboveEqual:   acond = Assembler::HS; break;
1074         default:                    ShouldNotReachHere();
1075           acond = Assembler::EQ;  // unreachable
1076       }
1077     }
1078     __ br(acond,*(op->label()));
1079   }
1080 }
1081 
1082 
1083 
1084 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1085   LIR_Opr src  = op->in_opr();
1086   LIR_Opr dest = op->result_opr();
1087 
1088   switch (op->bytecode()) {
1089     case Bytecodes::_i2f:
1090       {
1091         __ scvtfws(dest->as_float_reg(), src->as_register());
1092         break;
1093       }
1094     case Bytecodes::_i2d:
1095       {
1096         __ scvtfwd(dest->as_double_reg(), src->as_register());
1097         break;
1098       }
1099     case Bytecodes::_l2d:
1100       {
1101         __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1102         break;
1103       }
1104     case Bytecodes::_l2f:
1105       {
1106         __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1107         break;
1108       }
1109     case Bytecodes::_f2d:
1110       {
1111         __ fcvts(dest->as_double_reg(), src->as_float_reg());
1112         break;
1113       }
1114     case Bytecodes::_d2f:
1115       {
1116         __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1117         break;
1118       }
1119     case Bytecodes::_i2c:
1120       {
1121         __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1122         break;
1123       }
1124     case Bytecodes::_i2l:
1125       {
1126         __ sxtw(dest->as_register_lo(), src->as_register());
1127         break;
1128       }
1129     case Bytecodes::_i2s:
1130       {
1131         __ sxth(dest->as_register(), src->as_register());
1132         break;
1133       }
1134     case Bytecodes::_i2b:
1135       {
1136         __ sxtb(dest->as_register(), src->as_register());
1137         break;
1138       }
1139     case Bytecodes::_l2i:
1140       {
1141         _masm->block_comment("FIXME: This could be a no-op");
1142         __ uxtw(dest->as_register(), src->as_register_lo());
1143         break;
1144       }
1145     case Bytecodes::_d2l:
1146       {
1147         __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1148         break;
1149       }
1150     case Bytecodes::_f2i:
1151       {
1152         __ fcvtzsw(dest->as_register(), src->as_float_reg());
1153         break;
1154       }
1155     case Bytecodes::_f2l:
1156       {
1157         __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1158         break;
1159       }
1160     case Bytecodes::_d2i:
1161       {
1162         __ fcvtzdw(dest->as_register(), src->as_double_reg());
1163         break;
1164       }
1165     default: ShouldNotReachHere();
1166   }
1167 }
1168 
1169 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1170   if (op->init_check()) {
1171     __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1172     __ ldarb(rscratch1, rscratch1);
1173     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1174     add_debug_info_for_null_check_here(op->stub()->info());
1175     __ br(Assembler::NE, *op->stub()->entry());
1176   }
1177   __ allocate_object(op->obj()->as_register(),
1178                      op->tmp1()->as_register(),
1179                      op->tmp2()->as_register(),
1180                      op->header_size(),
1181                      op->object_size(),
1182                      op->klass()->as_register(),
1183                      *op->stub()->entry());
1184   __ bind(*op->stub()->continuation());
1185 }
1186 
1187 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1188   Register len =  op->len()->as_register();
1189   __ uxtw(len, len);
1190 
1191   if (UseSlowPath ||
1192       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1193       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1194     __ b(*op->stub()->entry());
1195   } else {
1196     Register tmp1 = op->tmp1()->as_register();
1197     Register tmp2 = op->tmp2()->as_register();
1198     Register tmp3 = op->tmp3()->as_register();
1199     if (len == tmp1) {
1200       tmp1 = tmp3;
1201     } else if (len == tmp2) {
1202       tmp2 = tmp3;
1203     } else if (len == tmp3) {
1204       // everything is ok
1205     } else {
1206       __ mov(tmp3, len);
1207     }
1208     __ allocate_array(op->obj()->as_register(),
1209                       len,
1210                       tmp1,
1211                       tmp2,
1212                       arrayOopDesc::base_offset_in_bytes(op->type()),
1213                       array_element_size(op->type()),
1214                       op->klass()->as_register(),
1215                       *op->stub()->entry(),
1216                       op->zero_array());
1217   }
1218   __ bind(*op->stub()->continuation());
1219 }
1220 
1221 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
1222                                         ciProfileData *data, Register recv) {
1223 
1224   int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1225   __ profile_receiver_type(recv, mdo, mdp_offset);
1226 }
1227 
1228 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1229   // we always need a stub for the failure case.
1230   CodeStub* stub = op->stub();
1231   Register obj = op->object()->as_register();
1232   Register k_RInfo = op->tmp1()->as_register();
1233   Register klass_RInfo = op->tmp2()->as_register();
1234   Register dst = op->result_opr()->as_register();
1235   ciKlass* k = op->klass();
1236   Register Rtmp1 = noreg;
1237 
1238   // check if it needs to be profiled
1239   ciMethodData* md;
1240   ciProfileData* data;
1241 
1242   const bool should_profile = op->should_profile();
1243 
1244   if (should_profile) {
1245     ciMethod* method = op->profiled_method();
1246     assert(method != nullptr, "Should have method");
1247     int bci = op->profiled_bci();
1248     md = method->method_data_or_null();
1249     assert(md != nullptr, "Sanity");
1250     data = md->bci_to_data(bci);
1251     assert(data != nullptr,                "need data for type check");
1252     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1253   }
1254   Label* success_target = success;
1255   Label* failure_target = failure;
1256 
1257   if (obj == k_RInfo) {
1258     k_RInfo = dst;
1259   } else if (obj == klass_RInfo) {
1260     klass_RInfo = dst;
1261   }
1262   if (k->is_loaded() && !UseCompressedClassPointers) {
1263     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1264   } else {
1265     Rtmp1 = op->tmp3()->as_register();
1266     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1267   }
1268 
1269   assert_different_registers(obj, k_RInfo, klass_RInfo);
1270 
1271   if (should_profile) {
1272     Register mdo  = klass_RInfo;
1273     __ mov_metadata(mdo, md->constant_encoding());
1274     Label not_null;
1275     __ cbnz(obj, not_null);
1276     // Object is null; update MDO and exit
1277     Address data_addr
1278       = __ form_address(rscratch2, mdo,
1279                         md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1280                         0);
1281     __ ldrb(rscratch1, data_addr);
1282     __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1283     __ strb(rscratch1, data_addr);
1284     __ b(*obj_is_null);
1285     __ bind(not_null);
1286 
1287     Register recv = k_RInfo;
1288     __ load_klass(recv, obj);
1289     type_profile_helper(mdo, md, data, recv);
1290   } else {
1291     __ cbz(obj, *obj_is_null);
1292   }
1293 
1294   if (!k->is_loaded()) {
1295     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1296   } else {
1297     __ mov_metadata(k_RInfo, k->constant_encoding());
1298   }
1299   __ verify_oop(obj);
1300 
1301   if (op->fast_check()) {
1302     // get object class
1303     // not a safepoint as obj null check happens earlier
1304     __ load_klass(rscratch1, obj);
1305     __ cmp( rscratch1, k_RInfo);
1306 
1307     __ br(Assembler::NE, *failure_target);
1308     // successful cast, fall through to profile or jump
1309   } else {
1310     // get object class
1311     // not a safepoint as obj null check happens earlier
1312     __ load_klass(klass_RInfo, obj);
1313     if (k->is_loaded()) {
1314       // See if we get an immediate positive hit
1315       __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1316       __ cmp(k_RInfo, rscratch1);
1317       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1318         __ br(Assembler::NE, *failure_target);
1319         // successful cast, fall through to profile or jump
1320       } else {
1321         // See if we get an immediate positive hit
1322         __ br(Assembler::EQ, *success_target);
1323         // check for self
1324         __ cmp(klass_RInfo, k_RInfo);
1325         __ br(Assembler::EQ, *success_target);
1326 
1327         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1328         __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1329         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1330         // result is a boolean
1331         __ cbzw(klass_RInfo, *failure_target);
1332         // successful cast, fall through to profile or jump
1333       }
1334     } else {
1335       // perform the fast part of the checking logic
1336       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1337       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1338       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1339       __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1340       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1341       // result is a boolean
1342       __ cbz(k_RInfo, *failure_target);
1343       // successful cast, fall through to profile or jump
1344     }
1345   }
1346   __ b(*success);
1347 }
1348 
1349 
1350 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1351   const bool should_profile = op->should_profile();
1352 
1353   LIR_Code code = op->code();
1354   if (code == lir_store_check) {
1355     Register value = op->object()->as_register();
1356     Register array = op->array()->as_register();
1357     Register k_RInfo = op->tmp1()->as_register();
1358     Register klass_RInfo = op->tmp2()->as_register();
1359     Register Rtmp1 = op->tmp3()->as_register();
1360 
1361     CodeStub* stub = op->stub();
1362 
1363     // check if it needs to be profiled
1364     ciMethodData* md;
1365     ciProfileData* data;
1366 
1367     if (should_profile) {
1368       ciMethod* method = op->profiled_method();
1369       assert(method != nullptr, "Should have method");
1370       int bci = op->profiled_bci();
1371       md = method->method_data_or_null();
1372       assert(md != nullptr, "Sanity");
1373       data = md->bci_to_data(bci);
1374       assert(data != nullptr,                "need data for type check");
1375       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1376     }
1377     Label done;
1378     Label* success_target = &done;
1379     Label* failure_target = stub->entry();
1380 
1381     if (should_profile) {
1382       Label not_null;
1383       Register mdo  = klass_RInfo;
1384       __ mov_metadata(mdo, md->constant_encoding());
1385       __ cbnz(value, not_null);
1386       // Object is null; update MDO and exit
1387       Address data_addr
1388         = __ form_address(rscratch2, mdo,
1389                           md->byte_offset_of_slot(data, DataLayout::flags_offset()), 0);
1390       __ ldrb(rscratch1, data_addr);
1391       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1392       __ strb(rscratch1, data_addr);
1393       __ b(done);
1394       __ bind(not_null);
1395 
1396       Register recv = k_RInfo;
1397       __ load_klass(recv, value);
1398       type_profile_helper(mdo, md, data, recv);
1399     } else {
1400       __ cbz(value, done);
1401     }
1402 
1403     add_debug_info_for_null_check_here(op->info_for_exception());
1404     __ load_klass(k_RInfo, array);
1405     __ load_klass(klass_RInfo, value);
1406 
1407     // get instance klass (it's already uncompressed)
1408     __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1409     // perform the fast part of the checking logic
1410     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1411     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1412     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1413     __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1414     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1415     // result is a boolean
1416     __ cbzw(k_RInfo, *failure_target);
1417     // fall through to the success case
1418 
1419     __ bind(done);
1420   } else if (code == lir_checkcast) {
1421     Register obj = op->object()->as_register();
1422     Register dst = op->result_opr()->as_register();
1423     Label success;
1424     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1425     __ bind(success);
1426     if (dst != obj) {
1427       __ mov(dst, obj);
1428     }
1429   } else if (code == lir_instanceof) {
1430     Register obj = op->object()->as_register();
1431     Register dst = op->result_opr()->as_register();
1432     Label success, failure, done;
1433     emit_typecheck_helper(op, &success, &failure, &failure);
1434     __ bind(failure);
1435     __ mov(dst, zr);
1436     __ b(done);
1437     __ bind(success);
1438     __ mov(dst, 1);
1439     __ bind(done);
1440   } else {
1441     ShouldNotReachHere();
1442   }
1443 }
1444 
1445 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1446   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1447   __ cset(rscratch1, Assembler::NE);
1448   __ membar(__ AnyAny);
1449 }
1450 
1451 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1452   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1453   __ cset(rscratch1, Assembler::NE);
1454   __ membar(__ AnyAny);
1455 }
1456 
1457 
1458 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1459   Register addr;
1460   if (op->addr()->is_register()) {
1461     addr = as_reg(op->addr());
1462   } else {
1463     assert(op->addr()->is_address(), "what else?");
1464     LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1465     assert(addr_ptr->disp() == 0, "need 0 disp");
1466     assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1467     addr = as_reg(addr_ptr->base());
1468   }
1469   Register newval = as_reg(op->new_value());
1470   Register cmpval = as_reg(op->cmp_value());
1471 
1472   if (op->code() == lir_cas_obj) {
1473     if (UseCompressedOops) {
1474       Register t1 = op->tmp1()->as_register();
1475       assert(op->tmp1()->is_valid(), "must be");
1476       __ encode_heap_oop(t1, cmpval);
1477       cmpval = t1;
1478       __ encode_heap_oop(rscratch2, newval);
1479       newval = rscratch2;
1480       casw(addr, newval, cmpval);
1481     } else {
1482       casl(addr, newval, cmpval);
1483     }
1484   } else if (op->code() == lir_cas_int) {
1485     casw(addr, newval, cmpval);
1486   } else {
1487     casl(addr, newval, cmpval);
1488   }
1489 }
1490 
1491 
1492 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1493                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1494   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on aarch64");
1495 
1496   Assembler::Condition acond, ncond;
1497   switch (condition) {
1498   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1499   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1500   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1501   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1502   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1503   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1504   case lir_cond_belowEqual:
1505   case lir_cond_aboveEqual:
1506   default:                    ShouldNotReachHere();
1507     acond = Assembler::EQ; ncond = Assembler::NE;  // unreachable
1508   }
1509 
1510   assert(result->is_single_cpu() || result->is_double_cpu(),
1511          "expect single register for result");
1512   if (opr1->is_constant() && opr2->is_constant()
1513       && opr1->type() == T_INT && opr2->type() == T_INT) {
1514     jint val1 = opr1->as_jint();
1515     jint val2 = opr2->as_jint();
1516     if (val1 == 0 && val2 == 1) {
1517       __ cset(result->as_register(), ncond);
1518       return;
1519     } else if (val1 == 1 && val2 == 0) {
1520       __ cset(result->as_register(), acond);
1521       return;
1522     }
1523   }
1524 
1525   if (opr1->is_constant() && opr2->is_constant()
1526       && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1527     jlong val1 = opr1->as_jlong();
1528     jlong val2 = opr2->as_jlong();
1529     if (val1 == 0 && val2 == 1) {
1530       __ cset(result->as_register_lo(), ncond);
1531       return;
1532     } else if (val1 == 1 && val2 == 0) {
1533       __ cset(result->as_register_lo(), acond);
1534       return;
1535     }
1536   }
1537 
1538   if (opr1->is_stack()) {
1539     stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1540     opr1 = FrameMap::rscratch1_opr;
1541   } else if (opr1->is_constant()) {
1542     LIR_Opr tmp
1543       = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1544     const2reg(opr1, tmp, lir_patch_none, nullptr);
1545     opr1 = tmp;
1546   }
1547 
1548   if (opr2->is_stack()) {
1549     stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1550     opr2 = FrameMap::rscratch2_opr;
1551   } else if (opr2->is_constant()) {
1552     LIR_Opr tmp
1553       = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1554     const2reg(opr2, tmp, lir_patch_none, nullptr);
1555     opr2 = tmp;
1556   }
1557 
1558   if (result->type() == T_LONG)
1559     __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1560   else
1561     __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1562 }
1563 
1564 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1565   assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1566 
1567   if (left->is_single_cpu()) {
1568     Register lreg = left->as_register();
1569     Register dreg = as_reg(dest);
1570 
1571     if (right->is_single_cpu()) {
1572       // cpu register - cpu register
1573 
1574       assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1575              "should be");
1576       Register rreg = right->as_register();
1577       switch (code) {
1578       case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1579       case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1580       case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1581       default:      ShouldNotReachHere();
1582       }
1583 
1584     } else if (right->is_double_cpu()) {
1585       Register rreg = right->as_register_lo();
1586       // single_cpu + double_cpu: can happen with obj+long
1587       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1588       switch (code) {
1589       case lir_add: __ add(dreg, lreg, rreg); break;
1590       case lir_sub: __ sub(dreg, lreg, rreg); break;
1591       default: ShouldNotReachHere();
1592       }
1593     } else if (right->is_constant()) {
1594       // cpu register - constant
1595       jlong c;
1596 
1597       // FIXME.  This is fugly: we really need to factor all this logic.
1598       switch(right->type()) {
1599       case T_LONG:
1600         c = right->as_constant_ptr()->as_jlong();
1601         break;
1602       case T_INT:
1603       case T_ADDRESS:
1604         c = right->as_constant_ptr()->as_jint();
1605         break;
1606       default:
1607         ShouldNotReachHere();
1608         c = 0;  // unreachable
1609         break;
1610       }
1611 
1612       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1613       if (c == 0 && dreg == lreg) {
1614         COMMENT("effective nop elided");
1615         return;
1616       }
1617       switch(left->type()) {
1618       case T_INT:
1619         switch (code) {
1620         case lir_add: __ addw(dreg, lreg, c); break;
1621         case lir_sub: __ subw(dreg, lreg, c); break;
1622         default: ShouldNotReachHere();
1623         }
1624         break;
1625       case T_OBJECT:
1626       case T_ADDRESS:
1627         switch (code) {
1628         case lir_add: __ add(dreg, lreg, c); break;
1629         case lir_sub: __ sub(dreg, lreg, c); break;
1630         default: ShouldNotReachHere();
1631         }
1632         break;
1633       default:
1634         ShouldNotReachHere();
1635       }
1636     } else {
1637       ShouldNotReachHere();
1638     }
1639 
1640   } else if (left->is_double_cpu()) {
1641     Register lreg_lo = left->as_register_lo();
1642 
1643     if (right->is_double_cpu()) {
1644       // cpu register - cpu register
1645       Register rreg_lo = right->as_register_lo();
1646       switch (code) {
1647       case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1648       case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1649       case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1650       case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1651       case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1652       default:
1653         ShouldNotReachHere();
1654       }
1655 
1656     } else if (right->is_constant()) {
1657       jlong c = right->as_constant_ptr()->as_jlong();
1658       Register dreg = as_reg(dest);
1659       switch (code) {
1660         case lir_add:
1661         case lir_sub:
1662           if (c == 0 && dreg == lreg_lo) {
1663             COMMENT("effective nop elided");
1664             return;
1665           }
1666           code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
1667           break;
1668         case lir_div:
1669           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1670           if (c == 1) {
1671             // move lreg_lo to dreg if divisor is 1
1672             __ mov(dreg, lreg_lo);
1673           } else {
1674             unsigned int shift = log2i_exact(c);
1675             // use rscratch1 as intermediate result register
1676             __ asr(rscratch1, lreg_lo, 63);
1677             __ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);
1678             __ asr(dreg, rscratch1, shift);
1679           }
1680           break;
1681         case lir_rem:
1682           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1683           if (c == 1) {
1684             // move 0 to dreg if divisor is 1
1685             __ mov(dreg, zr);
1686           } else {
1687             // use rscratch1 as intermediate result register
1688             __ negs(rscratch1, lreg_lo);
1689             __ andr(dreg, lreg_lo, c - 1);
1690             __ andr(rscratch1, rscratch1, c - 1);
1691             __ csneg(dreg, dreg, rscratch1, Assembler::MI);
1692           }
1693           break;
1694         default:
1695           ShouldNotReachHere();
1696       }
1697     } else {
1698       ShouldNotReachHere();
1699     }
1700   } else if (left->is_single_fpu()) {
1701     assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1702     switch (code) {
1703     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1704     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1705     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1706     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1707     default:
1708       ShouldNotReachHere();
1709     }
1710   } else if (left->is_double_fpu()) {
1711     if (right->is_double_fpu()) {
1712       // fpu register - fpu register
1713       switch (code) {
1714       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1715       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1716       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1717       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1718       default:
1719         ShouldNotReachHere();
1720       }
1721     } else {
1722       if (right->is_constant()) {
1723         ShouldNotReachHere();
1724       }
1725       ShouldNotReachHere();
1726     }
1727   } else if (left->is_single_stack() || left->is_address()) {
1728     assert(left == dest, "left and dest must be equal");
1729     ShouldNotReachHere();
1730   } else {
1731     ShouldNotReachHere();
1732   }
1733 }
1734 
1735 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1736   switch(code) {
1737   case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1738   case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1739   case lir_f2hf: __ flt_to_flt16(dest->as_register(), value->as_float_reg(), tmp->as_float_reg()); break;
1740   case lir_hf2f: __ flt16_to_flt(dest->as_float_reg(), value->as_register(), tmp->as_float_reg()); break;
1741   default      : ShouldNotReachHere();
1742   }
1743 }
1744 
1745 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1746 
1747   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1748   Register Rleft = left->is_single_cpu() ? left->as_register() :
1749                                            left->as_register_lo();
1750    if (dst->is_single_cpu()) {
1751      Register Rdst = dst->as_register();
1752      if (right->is_constant()) {
1753        switch (code) {
1754          case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1755          case lir_logic_or:  __ orrw (Rdst, Rleft, right->as_jint()); break;
1756          case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1757          default: ShouldNotReachHere(); break;
1758        }
1759      } else {
1760        Register Rright = right->is_single_cpu() ? right->as_register() :
1761                                                   right->as_register_lo();
1762        switch (code) {
1763          case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1764          case lir_logic_or:  __ orrw (Rdst, Rleft, Rright); break;
1765          case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1766          default: ShouldNotReachHere(); break;
1767        }
1768      }
1769    } else {
1770      Register Rdst = dst->as_register_lo();
1771      if (right->is_constant()) {
1772        switch (code) {
1773          case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1774          case lir_logic_or:  __ orr (Rdst, Rleft, right->as_jlong()); break;
1775          case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1776          default: ShouldNotReachHere(); break;
1777        }
1778      } else {
1779        Register Rright = right->is_single_cpu() ? right->as_register() :
1780                                                   right->as_register_lo();
1781        switch (code) {
1782          case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1783          case lir_logic_or:  __ orr (Rdst, Rleft, Rright); break;
1784          case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1785          default: ShouldNotReachHere(); break;
1786        }
1787      }
1788    }
1789 }
1790 
1791 
1792 
1793 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {
1794 
1795   // opcode check
1796   assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
1797   bool is_irem = (code == lir_irem);
1798 
1799   // operand check
1800   assert(left->is_single_cpu(),   "left must be register");
1801   assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
1802   assert(result->is_single_cpu(), "result must be register");
1803   Register lreg = left->as_register();
1804   Register dreg = result->as_register();
1805 
1806   // power-of-2 constant check and codegen
1807   if (right->is_constant()) {
1808     int c = right->as_constant_ptr()->as_jint();
1809     assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1810     if (is_irem) {
1811       if (c == 1) {
1812         // move 0 to dreg if divisor is 1
1813         __ movw(dreg, zr);
1814       } else {
1815         // use rscratch1 as intermediate result register
1816         __ negsw(rscratch1, lreg);
1817         __ andw(dreg, lreg, c - 1);
1818         __ andw(rscratch1, rscratch1, c - 1);
1819         __ csnegw(dreg, dreg, rscratch1, Assembler::MI);
1820       }
1821     } else {
1822       if (c == 1) {
1823         // move lreg to dreg if divisor is 1
1824         __ movw(dreg, lreg);
1825       } else {
1826         unsigned int shift = exact_log2(c);
1827         // use rscratch1 as intermediate result register
1828         __ asrw(rscratch1, lreg, 31);
1829         __ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
1830         __ asrw(dreg, rscratch1, shift);
1831       }
1832     }
1833   } else {
1834     Register rreg = right->as_register();
1835     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1836   }
1837 }
1838 
1839 
1840 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1841   if (opr1->is_constant() && opr2->is_single_cpu()) {
1842     // tableswitch
1843     Register reg = as_reg(opr2);
1844     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1845     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1846   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1847     Register reg1 = as_reg(opr1);
1848     if (opr2->is_single_cpu()) {
1849       // cpu register - cpu register
1850       Register reg2 = opr2->as_register();
1851       if (is_reference_type(opr1->type())) {
1852         __ cmpoop(reg1, reg2);
1853       } else {
1854         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1855         __ cmpw(reg1, reg2);
1856       }
1857       return;
1858     }
1859     if (opr2->is_double_cpu()) {
1860       // cpu register - cpu register
1861       Register reg2 = opr2->as_register_lo();
1862       __ cmp(reg1, reg2);
1863       return;
1864     }
1865 
1866     if (opr2->is_constant()) {
1867       bool is_32bit = false; // width of register operand
1868       jlong imm;
1869 
1870       switch(opr2->type()) {
1871       case T_INT:
1872         imm = opr2->as_constant_ptr()->as_jint();
1873         is_32bit = true;
1874         break;
1875       case T_LONG:
1876         imm = opr2->as_constant_ptr()->as_jlong();
1877         break;
1878       case T_ADDRESS:
1879         imm = opr2->as_constant_ptr()->as_jint();
1880         break;
1881       case T_METADATA:
1882         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1883         break;
1884       case T_OBJECT:
1885       case T_ARRAY:
1886         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1887         __ cmpoop(reg1, rscratch1);
1888         return;
1889       default:
1890         ShouldNotReachHere();
1891         imm = 0;  // unreachable
1892         break;
1893       }
1894 
1895       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1896         if (is_32bit)
1897           __ cmpw(reg1, imm);
1898         else
1899           __ subs(zr, reg1, imm);
1900         return;
1901       } else {
1902         __ mov(rscratch1, imm);
1903         if (is_32bit)
1904           __ cmpw(reg1, rscratch1);
1905         else
1906           __ cmp(reg1, rscratch1);
1907         return;
1908       }
1909     } else
1910       ShouldNotReachHere();
1911   } else if (opr1->is_single_fpu()) {
1912     FloatRegister reg1 = opr1->as_float_reg();
1913     assert(opr2->is_single_fpu(), "expect single float register");
1914     FloatRegister reg2 = opr2->as_float_reg();
1915     __ fcmps(reg1, reg2);
1916   } else if (opr1->is_double_fpu()) {
1917     FloatRegister reg1 = opr1->as_double_reg();
1918     assert(opr2->is_double_fpu(), "expect double float register");
1919     FloatRegister reg2 = opr2->as_double_reg();
1920     __ fcmpd(reg1, reg2);
1921   } else {
1922     ShouldNotReachHere();
1923   }
1924 }
1925 
1926 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1927   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1928     bool is_unordered_less = (code == lir_ucmp_fd2i);
1929     if (left->is_single_fpu()) {
1930       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1931     } else if (left->is_double_fpu()) {
1932       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1933     } else {
1934       ShouldNotReachHere();
1935     }
1936   } else if (code == lir_cmp_l2i) {
1937     Label done;
1938     __ cmp(left->as_register_lo(), right->as_register_lo());
1939     __ mov(dst->as_register(), (uint64_t)-1L);
1940     __ br(Assembler::LT, done);
1941     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1942     __ bind(done);
1943   } else {
1944     ShouldNotReachHere();
1945   }
1946 }
1947 
1948 
1949 void LIR_Assembler::align_call(LIR_Code code) {  }
1950 
1951 
1952 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1953   address call = __ trampoline_call(Address(op->addr(), rtype));
1954   if (call == nullptr) {
1955     bailout("trampoline stub overflow");
1956     return;
1957   }
1958   add_call_info(code_offset(), op->info());
1959   __ post_call_nop();
1960 }
1961 
1962 
1963 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1964   address call = __ ic_call(op->addr());
1965   if (call == nullptr) {
1966     bailout("trampoline stub overflow");
1967     return;
1968   }
1969   add_call_info(code_offset(), op->info());
1970   __ post_call_nop();
1971 }
1972 
1973 void LIR_Assembler::emit_static_call_stub() {
1974   address call_pc = __ pc();
1975   address stub = __ start_a_stub(call_stub_size());
1976   if (stub == nullptr) {
1977     bailout("static call stub overflow");
1978     return;
1979   }
1980 
1981   int start = __ offset();
1982 
1983   __ relocate(static_stub_Relocation::spec(call_pc));
1984   __ emit_static_call_stub();
1985 
1986   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
1987         <= call_stub_size(), "stub too big");
1988   __ end_a_stub();
1989 }
1990 
1991 
1992 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1993   assert(exceptionOop->as_register() == r0, "must match");
1994   assert(exceptionPC->as_register() == r3, "must match");
1995 
1996   // exception object is not added to oop map by LinearScan
1997   // (LinearScan assumes that no oops are in fixed registers)
1998   info->add_register_oop(exceptionOop);
1999   StubId unwind_id;
2000 
2001   // get current pc information
2002   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2003   if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
2004     // As no instructions have been generated yet for this LIR node it's
2005     // possible that an oop map already exists for the current offset.
2006     // In that case insert an dummy NOP here to ensure all oop map PCs
2007     // are unique. See JDK-8237483.
2008     __ nop();
2009   }
2010   int pc_for_athrow_offset = __ offset();
2011   InternalAddress pc_for_athrow(__ pc());
2012   __ adr(exceptionPC->as_register(), pc_for_athrow);
2013   add_call_info(pc_for_athrow_offset, info); // for exception handler
2014 
2015   __ verify_not_null_oop(r0);
2016   // search an exception handler (r0: exception oop, r3: throwing pc)
2017   if (compilation()->has_fpu_code()) {
2018     unwind_id = StubId::c1_handle_exception_id;
2019   } else {
2020     unwind_id = StubId::c1_handle_exception_nofpu_id;
2021   }
2022   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2023 
2024   // FIXME: enough room for two byte trap   ????
2025   __ nop();
2026 }
2027 
2028 
2029 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2030   assert(exceptionOop->as_register() == r0, "must match");
2031 
2032   __ b(_unwind_handler_entry);
2033 }
2034 
2035 
2036 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2037   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2038   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2039 
2040   switch (left->type()) {
2041     case T_INT: {
2042       switch (code) {
2043       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2044       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2045       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2046       default:
2047         ShouldNotReachHere();
2048         break;
2049       }
2050       break;
2051     case T_LONG:
2052     case T_ADDRESS:
2053     case T_OBJECT:
2054       switch (code) {
2055       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2056       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2057       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2058       default:
2059         ShouldNotReachHere();
2060         break;
2061       }
2062       break;
2063     default:
2064       ShouldNotReachHere();
2065       break;
2066     }
2067   }
2068 }
2069 
2070 
2071 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2072   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2073   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2074 
2075   switch (left->type()) {
2076     case T_INT: {
2077       switch (code) {
2078       case lir_shl:  __ lslw (dreg, lreg, count); break;
2079       case lir_shr:  __ asrw (dreg, lreg, count); break;
2080       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2081       default:
2082         ShouldNotReachHere();
2083         break;
2084       }
2085       break;
2086     case T_LONG:
2087     case T_ADDRESS:
2088     case T_OBJECT:
2089       switch (code) {
2090       case lir_shl:  __ lsl (dreg, lreg, count); break;
2091       case lir_shr:  __ asr (dreg, lreg, count); break;
2092       case lir_ushr: __ lsr (dreg, lreg, count); break;
2093       default:
2094         ShouldNotReachHere();
2095         break;
2096       }
2097       break;
2098     default:
2099       ShouldNotReachHere();
2100       break;
2101     }
2102   }
2103 }
2104 
2105 
2106 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2107   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2108   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2109   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2110   __ str (r, Address(sp, offset_from_rsp_in_bytes));
2111 }
2112 
2113 
2114 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2115   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2116   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2117   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2118   __ mov (rscratch1, c);
2119   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2120 }
2121 
2122 
2123 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2124   ShouldNotReachHere();
2125   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2126   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2127   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2128   __ lea(rscratch1, __ constant_oop_address(o));
2129   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2130 }
2131 
2132 
2133 // This code replaces a call to arraycopy; no exception may
2134 // be thrown in this code, they must be thrown in the System.arraycopy
2135 // activation frame; we could save some checks if this would not be the case
2136 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2137   ciArrayKlass* default_type = op->expected_type();
2138   Register src = op->src()->as_register();
2139   Register dst = op->dst()->as_register();
2140   Register src_pos = op->src_pos()->as_register();
2141   Register dst_pos = op->dst_pos()->as_register();
2142   Register length  = op->length()->as_register();
2143   Register tmp = op->tmp()->as_register();
2144 
2145   CodeStub* stub = op->stub();
2146   int flags = op->flags();
2147   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2148   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2149 
2150   // if we don't know anything, just go through the generic arraycopy
2151   if (default_type == nullptr // || basic_type == T_OBJECT
2152       ) {
2153     Label done;
2154     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2155 
2156     // Save the arguments in case the generic arraycopy fails and we
2157     // have to fall back to the JNI stub
2158     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2159     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2160     __ str(src,              Address(sp, 4*BytesPerWord));
2161 
2162     address copyfunc_addr = StubRoutines::generic_arraycopy();
2163     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2164 
2165     // The arguments are in java calling convention so we shift them
2166     // to C convention
2167     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2168     __ mov(c_rarg0, j_rarg0);
2169     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2170     __ mov(c_rarg1, j_rarg1);
2171     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2172     __ mov(c_rarg2, j_rarg2);
2173     assert_different_registers(c_rarg3, j_rarg4);
2174     __ mov(c_rarg3, j_rarg3);
2175     __ mov(c_rarg4, j_rarg4);
2176 #ifndef PRODUCT
2177     if (PrintC1Statistics) {
2178       __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2179     }
2180 #endif
2181     __ far_call(RuntimeAddress(copyfunc_addr));
2182 
2183     __ cbz(r0, *stub->continuation());
2184 
2185     // Reload values from the stack so they are where the stub
2186     // expects them.
2187     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2188     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2189     __ ldr(src,              Address(sp, 4*BytesPerWord));
2190 
2191     // r0 is -1^K where K == partial copied count
2192     __ eonw(rscratch1, r0, zr);
2193     // adjust length down and src/end pos up by partial copied count
2194     __ subw(length, length, rscratch1);
2195     __ addw(src_pos, src_pos, rscratch1);
2196     __ addw(dst_pos, dst_pos, rscratch1);
2197     __ b(*stub->entry());
2198 
2199     __ bind(*stub->continuation());
2200     return;
2201   }
2202 
2203   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2204 
2205   int elem_size = type2aelembytes(basic_type);
2206   int scale = exact_log2(elem_size);
2207 
2208   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2209   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2210 
2211   // test for null
2212   if (flags & LIR_OpArrayCopy::src_null_check) {
2213     __ cbz(src, *stub->entry());
2214   }
2215   if (flags & LIR_OpArrayCopy::dst_null_check) {
2216     __ cbz(dst, *stub->entry());
2217   }
2218 
2219   // If the compiler was not able to prove that exact type of the source or the destination
2220   // of the arraycopy is an array type, check at runtime if the source or the destination is
2221   // an instance type.
2222   if (flags & LIR_OpArrayCopy::type_check) {
2223     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2224       __ load_klass(tmp, dst);
2225       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2226       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2227       __ br(Assembler::GE, *stub->entry());
2228     }
2229 
2230     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2231       __ load_klass(tmp, src);
2232       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2233       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2234       __ br(Assembler::GE, *stub->entry());
2235     }
2236   }
2237 
2238   // check if negative
2239   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2240     __ cmpw(src_pos, 0);
2241     __ br(Assembler::LT, *stub->entry());
2242   }
2243   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2244     __ cmpw(dst_pos, 0);
2245     __ br(Assembler::LT, *stub->entry());
2246   }
2247 
2248   if (flags & LIR_OpArrayCopy::length_positive_check) {
2249     __ cmpw(length, 0);
2250     __ br(Assembler::LT, *stub->entry());
2251   }
2252 
2253   if (flags & LIR_OpArrayCopy::src_range_check) {
2254     __ addw(tmp, src_pos, length);
2255     __ ldrw(rscratch1, src_length_addr);
2256     __ cmpw(tmp, rscratch1);
2257     __ br(Assembler::HI, *stub->entry());
2258   }
2259   if (flags & LIR_OpArrayCopy::dst_range_check) {
2260     __ addw(tmp, dst_pos, length);
2261     __ ldrw(rscratch1, dst_length_addr);
2262     __ cmpw(tmp, rscratch1);
2263     __ br(Assembler::HI, *stub->entry());
2264   }
2265 
2266   if (flags & LIR_OpArrayCopy::type_check) {
2267     // We don't know the array types are compatible
2268     if (basic_type != T_OBJECT) {
2269       // Simple test for basic type arrays
2270       __ cmp_klasses_from_objects(src, dst, tmp, rscratch1);
2271       __ br(Assembler::NE, *stub->entry());
2272     } else {
2273       // For object arrays, if src is a sub class of dst then we can
2274       // safely do the copy.
2275       Label cont, slow;
2276 
2277 #define PUSH(r1, r2)                                    \
2278       stp(r1, r2, __ pre(sp, -2 * wordSize));
2279 
2280 #define POP(r1, r2)                                     \
2281       ldp(r1, r2, __ post(sp, 2 * wordSize));
2282 
2283       __ PUSH(src, dst);
2284 
2285       __ load_klass(src, src);
2286       __ load_klass(dst, dst);
2287 
2288       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2289 
2290       __ PUSH(src, dst);
2291       __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2292       __ POP(src, dst);
2293 
2294       __ cbnz(src, cont);
2295 
2296       __ bind(slow);
2297       __ POP(src, dst);
2298 
2299       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2300       if (copyfunc_addr != nullptr) { // use stub if available
2301         // src is not a sub class of dst so we have to do a
2302         // per-element check.
2303 
2304         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2305         if ((flags & mask) != mask) {
2306           // Check that at least both of them object arrays.
2307           assert(flags & mask, "one of the two should be known to be an object array");
2308 
2309           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2310             __ load_klass(tmp, src);
2311           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2312             __ load_klass(tmp, dst);
2313           }
2314           int lh_offset = in_bytes(Klass::layout_helper_offset());
2315           Address klass_lh_addr(tmp, lh_offset);
2316           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2317           __ ldrw(rscratch1, klass_lh_addr);
2318           __ mov(rscratch2, objArray_lh);
2319           __ eorw(rscratch1, rscratch1, rscratch2);
2320           __ cbnzw(rscratch1, *stub->entry());
2321         }
2322 
2323        // Spill because stubs can use any register they like and it's
2324        // easier to restore just those that we care about.
2325         __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2326         __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2327         __ str(src,              Address(sp, 4*BytesPerWord));
2328 
2329         __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2330         __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2331         assert_different_registers(c_rarg0, dst, dst_pos, length);
2332         __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2333         __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2334         assert_different_registers(c_rarg1, dst, length);
2335         __ uxtw(c_rarg2, length);
2336         assert_different_registers(c_rarg2, dst);
2337 
2338         __ load_klass(c_rarg4, dst);
2339         __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2340         __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2341         __ far_call(RuntimeAddress(copyfunc_addr));
2342 
2343 #ifndef PRODUCT
2344         if (PrintC1Statistics) {
2345           Label failed;
2346           __ cbnz(r0, failed);
2347           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2348           __ bind(failed);
2349         }
2350 #endif
2351 
2352         __ cbz(r0, *stub->continuation());
2353 
2354 #ifndef PRODUCT
2355         if (PrintC1Statistics) {
2356           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2357         }
2358 #endif
2359         assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2360 
2361         // Restore previously spilled arguments
2362         __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2363         __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2364         __ ldr(src,              Address(sp, 4*BytesPerWord));
2365 
2366         // return value is -1^K where K is partial copied count
2367         __ eonw(rscratch1, r0, zr);
2368         // adjust length down and src/end pos up by partial copied count
2369         __ subw(length, length, rscratch1);
2370         __ addw(src_pos, src_pos, rscratch1);
2371         __ addw(dst_pos, dst_pos, rscratch1);
2372       }
2373 
2374       __ b(*stub->entry());
2375 
2376       __ bind(cont);
2377       __ POP(src, dst);
2378     }
2379   }
2380 
2381 #ifdef ASSERT
2382   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2383     // Sanity check the known type with the incoming class.  For the
2384     // primitive case the types must match exactly with src.klass and
2385     // dst.klass each exactly matching the default type.  For the
2386     // object array case, if no type check is needed then either the
2387     // dst type is exactly the expected type and the src type is a
2388     // subtype which we can't check or src is the same array as dst
2389     // but not necessarily exactly of type default_type.
2390     Label known_ok, halt;
2391     __ mov_metadata(tmp, default_type->constant_encoding());
2392 
2393     if (basic_type != T_OBJECT) {
2394       __ cmp_klass(dst, tmp, rscratch1);
2395       __ br(Assembler::NE, halt);
2396       __ cmp_klass(src, tmp, rscratch1);
2397       __ br(Assembler::EQ, known_ok);
2398     } else {
2399       __ cmp_klass(dst, tmp, rscratch1);
2400       __ br(Assembler::EQ, known_ok);
2401       __ cmp(src, dst);
2402       __ br(Assembler::EQ, known_ok);
2403     }
2404     __ bind(halt);
2405     __ stop("incorrect type information in arraycopy");
2406     __ bind(known_ok);
2407   }
2408 #endif
2409 
2410 #ifndef PRODUCT
2411   if (PrintC1Statistics) {
2412     __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2413   }
2414 #endif
2415 
2416   __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2417   __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2418   assert_different_registers(c_rarg0, dst, dst_pos, length);
2419   __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2420   __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2421   assert_different_registers(c_rarg1, dst, length);
2422   __ uxtw(c_rarg2, length);
2423   assert_different_registers(c_rarg2, dst);
2424 
2425   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2426   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2427   const char *name;
2428   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2429 
2430  CodeBlob *cb = CodeCache::find_blob(entry);
2431  if (cb) {
2432    __ far_call(RuntimeAddress(entry));
2433  } else {
2434    __ call_VM_leaf(entry, 3);
2435  }
2436 
2437   if (stub != nullptr) {
2438     __ bind(*stub->continuation());
2439   }
2440 }
2441 
2442 
2443 
2444 
2445 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2446   Register obj = op->obj_opr()->as_register();  // may not be an oop
2447   Register hdr = op->hdr_opr()->as_register();
2448   Register lock = op->lock_opr()->as_register();
2449   Register temp = op->scratch_opr()->as_register();
2450   if (op->code() == lir_lock) {
2451     // add debug info for NullPointerException only if one is possible
2452     int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
2453     if (op->info() != nullptr) {
2454       add_debug_info_for_null_check(null_check_offset, op->info());
2455     }
2456     // done
2457   } else if (op->code() == lir_unlock) {
2458     __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
2459   } else {
2460     Unimplemented();
2461   }
2462   __ bind(*op->stub()->continuation());
2463 }
2464 
2465 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2466   Register obj = op->obj()->as_pointer_register();
2467   Register result = op->result_opr()->as_pointer_register();
2468 
2469   CodeEmitInfo* info = op->info();
2470   if (info != nullptr) {
2471     add_debug_info_for_null_check_here(info);
2472   }
2473 
2474   __ load_klass(result, obj);
2475 }
2476 
2477 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2478   ciMethod* method = op->profiled_method();
2479   int bci          = op->profiled_bci();
2480   ciMethod* callee = op->profiled_callee();
2481 
2482   // Update counter for all call types
2483   ciMethodData* md = method->method_data_or_null();
2484   assert(md != nullptr, "Sanity");
2485   ciProfileData* data = md->bci_to_data(bci);
2486   assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2487   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2488   Register mdo  = op->mdo()->as_register();
2489   __ mov_metadata(mdo, md->constant_encoding());
2490   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2491   // Perform additional virtual call profiling for invokevirtual and
2492   // invokeinterface bytecodes
2493   if (op->should_profile_receiver_type()) {
2494     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2495     Register recv = op->recv()->as_register();
2496     assert_different_registers(mdo, recv);
2497     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2498     ciKlass* known_klass = op->known_holder();
2499     if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2500       // We know the type that will be seen at this call site; we can
2501       // statically update the MethodData* rather than needing to do
2502       // dynamic tests on the receiver type.
2503       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2504       for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
2505         ciKlass* receiver = vc_data->receiver(i);
2506         if (known_klass->equals(receiver)) {
2507           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2508           __ addptr(data_addr, DataLayout::counter_increment);
2509           return;
2510         }
2511       }
2512       // Receiver type is not found in profile data.
2513       // Fall back to runtime helper to handle the rest at runtime.
2514       __ mov_metadata(recv, known_klass->constant_encoding());
2515     } else {
2516       __ load_klass(recv, recv);
2517     }
2518     type_profile_helper(mdo, md, data, recv);
2519   } else {
2520     // Static call
2521     __ addptr(counter_addr, DataLayout::counter_increment);
2522   }
2523 }
2524 
2525 
2526 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2527   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2528 }
2529 
2530 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2531   assert(op->crc()->is_single_cpu(),  "crc must be register");
2532   assert(op->val()->is_single_cpu(),  "byte value must be register");
2533   assert(op->result_opr()->is_single_cpu(), "result must be register");
2534   Register crc = op->crc()->as_register();
2535   Register val = op->val()->as_register();
2536   Register res = op->result_opr()->as_register();
2537 
2538   assert_different_registers(val, crc, res);
2539   uint64_t offset;
2540   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2541   __ add(res, res, offset);
2542 
2543   __ mvnw(crc, crc); // ~crc
2544   __ update_byte_crc32(crc, val, res);
2545   __ mvnw(res, crc); // ~crc
2546 }
2547 
2548 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2549   COMMENT("emit_profile_type {");
2550   Register obj = op->obj()->as_register();
2551   Register tmp = op->tmp()->as_pointer_register();
2552   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2553   ciKlass* exact_klass = op->exact_klass();
2554   intptr_t current_klass = op->current_klass();
2555   bool not_null = op->not_null();
2556   bool no_conflict = op->no_conflict();
2557 
2558   Label update, next, none;
2559 
2560   bool do_null = !not_null;
2561   bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2562   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2563 
2564   assert(do_null || do_update, "why are we here?");
2565   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2566   assert(mdo_addr.base() != rscratch1, "wrong register");
2567 
2568   __ verify_oop(obj);
2569 
2570   if (tmp != obj) {
2571     assert_different_registers(obj, tmp, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
2572     __ mov(tmp, obj);
2573   } else {
2574     assert_different_registers(obj, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
2575   }
2576   if (do_null) {
2577     __ cbnz(tmp, update);
2578     if (!TypeEntries::was_null_seen(current_klass)) {
2579       __ ldr(rscratch2, mdo_addr);
2580       __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2581       __ str(rscratch2, mdo_addr);
2582     }
2583     if (do_update) {
2584 #ifndef ASSERT
2585       __ b(next);
2586     }
2587 #else
2588       __ b(next);
2589     }
2590   } else {
2591     __ cbnz(tmp, update);
2592     __ stop("unexpected null obj");
2593 #endif
2594   }
2595 
2596   __ bind(update);
2597 
2598   if (do_update) {
2599 #ifdef ASSERT
2600     if (exact_klass != nullptr) {
2601       Label ok;
2602       __ load_klass(tmp, tmp);
2603       __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2604       __ eor(rscratch1, tmp, rscratch1);
2605       __ cbz(rscratch1, ok);
2606       __ stop("exact klass and actual klass differ");
2607       __ bind(ok);
2608     }
2609 #endif
2610     if (!no_conflict) {
2611       if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2612         if (exact_klass != nullptr) {
2613           __ mov_metadata(tmp, exact_klass->constant_encoding());
2614         } else {
2615           __ load_klass(tmp, tmp);
2616         }
2617 
2618         __ ldr(rscratch2, mdo_addr);
2619         __ eor(tmp, tmp, rscratch2);
2620         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2621         // klass seen before, nothing to do. The unknown bit may have been
2622         // set already but no need to check.
2623         __ cbz(rscratch1, next);
2624 
2625         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2626 
2627         if (TypeEntries::is_type_none(current_klass)) {
2628           __ cbz(rscratch2, none);
2629           __ cmp(rscratch2, (u1)TypeEntries::null_seen);
2630           __ br(Assembler::EQ, none);
2631           // There is a chance that the checks above
2632           // fail if another thread has just set the
2633           // profiling to this obj's klass
2634           __ dmb(Assembler::ISHLD);
2635           __ eor(tmp, tmp, rscratch2); // get back original value before XOR
2636           __ ldr(rscratch2, mdo_addr);
2637           __ eor(tmp, tmp, rscratch2);
2638           __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2639           __ cbz(rscratch1, next);
2640         }
2641       } else {
2642         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2643                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2644 
2645         __ ldr(tmp, mdo_addr);
2646         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2647       }
2648 
2649       // different than before. Cannot keep accurate profile.
2650       __ ldr(rscratch2, mdo_addr);
2651       __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2652       __ str(rscratch2, mdo_addr);
2653 
2654       if (TypeEntries::is_type_none(current_klass)) {
2655         __ b(next);
2656 
2657         __ bind(none);
2658         // first time here. Set profile type.
2659         __ str(tmp, mdo_addr);
2660 #ifdef ASSERT
2661         __ andr(tmp, tmp, TypeEntries::type_mask);
2662         __ verify_klass_ptr(tmp);
2663 #endif
2664       }
2665     } else {
2666       // There's a single possible klass at this profile point
2667       assert(exact_klass != nullptr, "should be");
2668       if (TypeEntries::is_type_none(current_klass)) {
2669         __ mov_metadata(tmp, exact_klass->constant_encoding());
2670         __ ldr(rscratch2, mdo_addr);
2671         __ eor(tmp, tmp, rscratch2);
2672         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2673         __ cbz(rscratch1, next);
2674 #ifdef ASSERT
2675         {
2676           Label ok;
2677           __ ldr(rscratch1, mdo_addr);
2678           __ cbz(rscratch1, ok);
2679           __ cmp(rscratch1, (u1)TypeEntries::null_seen);
2680           __ br(Assembler::EQ, ok);
2681           // may have been set by another thread
2682           __ dmb(Assembler::ISHLD);
2683           __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2684           __ ldr(rscratch2, mdo_addr);
2685           __ eor(rscratch2, rscratch1, rscratch2);
2686           __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2687           __ cbz(rscratch2, ok);
2688 
2689           __ stop("unexpected profiling mismatch");
2690           __ bind(ok);
2691         }
2692 #endif
2693         // first time here. Set profile type.
2694         __ str(tmp, mdo_addr);
2695 #ifdef ASSERT
2696         __ andr(tmp, tmp, TypeEntries::type_mask);
2697         __ verify_klass_ptr(tmp);
2698 #endif
2699       } else {
2700         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2701                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2702 
2703         __ ldr(tmp, mdo_addr);
2704         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2705 
2706         __ orr(tmp, tmp, TypeEntries::type_unknown);
2707         __ str(tmp, mdo_addr);
2708         // FIXME: Write barrier needed here?
2709       }
2710     }
2711 
2712     __ bind(next);
2713   }
2714   COMMENT("} emit_profile_type");
2715 }
2716 
2717 
2718 void LIR_Assembler::align_backward_branch_target() {
2719 }
2720 
2721 
2722 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2723   // tmp must be unused
2724   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2725 
2726   if (left->is_single_cpu()) {
2727     assert(dest->is_single_cpu(), "expect single result reg");
2728     __ negw(dest->as_register(), left->as_register());
2729   } else if (left->is_double_cpu()) {
2730     assert(dest->is_double_cpu(), "expect double result reg");
2731     __ neg(dest->as_register_lo(), left->as_register_lo());
2732   } else if (left->is_single_fpu()) {
2733     assert(dest->is_single_fpu(), "expect single float result reg");
2734     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2735   } else {
2736     assert(left->is_double_fpu(), "expect double float operand reg");
2737     assert(dest->is_double_fpu(), "expect double float result reg");
2738     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2739   }
2740 }
2741 
2742 
2743 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2744   if (patch_code != lir_patch_none) {
2745     deoptimize_trap(info);
2746     return;
2747   }
2748 
2749   __ lea(dest->as_pointer_register(), as_Address(addr->as_address_ptr()));
2750 }
2751 
2752 
2753 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2754   assert(!tmp->is_valid(), "don't need temporary");
2755 
2756   CodeBlob *cb = CodeCache::find_blob(dest);
2757   if (cb) {
2758     __ far_call(RuntimeAddress(dest));
2759   } else {
2760     __ mov(rscratch1, RuntimeAddress(dest));
2761     __ blr(rscratch1);
2762   }
2763 
2764   if (info != nullptr) {
2765     add_call_info_here(info);
2766   }
2767   __ post_call_nop();
2768 }
2769 
2770 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2771   if (dest->is_address() || src->is_address()) {
2772     move_op(src, dest, type, lir_patch_none, info, /*wide*/false);
2773   } else {
2774     ShouldNotReachHere();
2775   }
2776 }
2777 
2778 #ifdef ASSERT
2779 // emit run-time assertion
2780 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2781   assert(op->code() == lir_assert, "must be");
2782 
2783   if (op->in_opr1()->is_valid()) {
2784     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2785     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2786   } else {
2787     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2788     assert(op->condition() == lir_cond_always, "no other conditions allowed");
2789   }
2790 
2791   Label ok;
2792   if (op->condition() != lir_cond_always) {
2793     Assembler::Condition acond = Assembler::AL;
2794     switch (op->condition()) {
2795       case lir_cond_equal:        acond = Assembler::EQ;  break;
2796       case lir_cond_notEqual:     acond = Assembler::NE;  break;
2797       case lir_cond_less:         acond = Assembler::LT;  break;
2798       case lir_cond_lessEqual:    acond = Assembler::LE;  break;
2799       case lir_cond_greaterEqual: acond = Assembler::GE;  break;
2800       case lir_cond_greater:      acond = Assembler::GT;  break;
2801       case lir_cond_belowEqual:   acond = Assembler::LS;  break;
2802       case lir_cond_aboveEqual:   acond = Assembler::HS;  break;
2803       default:                    ShouldNotReachHere();
2804     }
2805     __ br(acond, ok);
2806   }
2807   if (op->halt()) {
2808     const char* str = __ code_string(op->msg());
2809     __ stop(str);
2810   } else {
2811     breakpoint();
2812   }
2813   __ bind(ok);
2814 }
2815 #endif
2816 
2817 #ifndef PRODUCT
2818 #define COMMENT(x)   do { __ block_comment(x); } while (0)
2819 #else
2820 #define COMMENT(x)
2821 #endif
2822 
2823 void LIR_Assembler::membar() {
2824   COMMENT("membar");
2825   __ membar(MacroAssembler::AnyAny);
2826 }
2827 
2828 void LIR_Assembler::membar_acquire() {
2829   __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2830 }
2831 
2832 void LIR_Assembler::membar_release() {
2833   __ membar(Assembler::LoadStore|Assembler::StoreStore);
2834 }
2835 
2836 void LIR_Assembler::membar_loadload() {
2837   __ membar(Assembler::LoadLoad);
2838 }
2839 
2840 void LIR_Assembler::membar_storestore() {
2841   __ membar(MacroAssembler::StoreStore);
2842 }
2843 
2844 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2845 
2846 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2847 
2848 void LIR_Assembler::on_spin_wait() {
2849   __ spin_wait();
2850 }
2851 
2852 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2853   __ mov(result_reg->as_register(), rthread);
2854 }
2855 
2856 
2857 void LIR_Assembler::peephole(LIR_List *lir) {
2858 #if 0
2859   if (tableswitch_count >= max_tableswitches)
2860     return;
2861 
2862   /*
2863     This finite-state automaton recognizes sequences of compare-and-
2864     branch instructions.  We will turn them into a tableswitch.  You
2865     could argue that C1 really shouldn't be doing this sort of
2866     optimization, but without it the code is really horrible.
2867   */
2868 
2869   enum { start_s, cmp1_s, beq_s, cmp_s } state;
2870   int first_key, last_key = -2147483648;
2871   int next_key = 0;
2872   int start_insn = -1;
2873   int last_insn = -1;
2874   Register reg = noreg;
2875   LIR_Opr reg_opr;
2876   state = start_s;
2877 
2878   LIR_OpList* inst = lir->instructions_list();
2879   for (int i = 0; i < inst->length(); i++) {
2880     LIR_Op* op = inst->at(i);
2881     switch (state) {
2882     case start_s:
2883       first_key = -1;
2884       start_insn = i;
2885       switch (op->code()) {
2886       case lir_cmp:
2887         LIR_Opr opr1 = op->as_Op2()->in_opr1();
2888         LIR_Opr opr2 = op->as_Op2()->in_opr2();
2889         if (opr1->is_cpu_register() && opr1->is_single_cpu()
2890             && opr2->is_constant()
2891             && opr2->type() == T_INT) {
2892           reg_opr = opr1;
2893           reg = opr1->as_register();
2894           first_key = opr2->as_constant_ptr()->as_jint();
2895           next_key = first_key + 1;
2896           state = cmp_s;
2897           goto next_state;
2898         }
2899         break;
2900       }
2901       break;
2902     case cmp_s:
2903       switch (op->code()) {
2904       case lir_branch:
2905         if (op->as_OpBranch()->cond() == lir_cond_equal) {
2906           state = beq_s;
2907           last_insn = i;
2908           goto next_state;
2909         }
2910       }
2911       state = start_s;
2912       break;
2913     case beq_s:
2914       switch (op->code()) {
2915       case lir_cmp: {
2916         LIR_Opr opr1 = op->as_Op2()->in_opr1();
2917         LIR_Opr opr2 = op->as_Op2()->in_opr2();
2918         if (opr1->is_cpu_register() && opr1->is_single_cpu()
2919             && opr1->as_register() == reg
2920             && opr2->is_constant()
2921             && opr2->type() == T_INT
2922             && opr2->as_constant_ptr()->as_jint() == next_key) {
2923           last_key = next_key;
2924           next_key++;
2925           state = cmp_s;
2926           goto next_state;
2927         }
2928       }
2929       }
2930       last_key = next_key;
2931       state = start_s;
2932       break;
2933     default:
2934       assert(false, "impossible state");
2935     }
2936     if (state == start_s) {
2937       if (first_key < last_key - 5L && reg != noreg) {
2938         {
2939           // printf("found run register %d starting at insn %d low value %d high value %d\n",
2940           //        reg->encoding(),
2941           //        start_insn, first_key, last_key);
2942           //   for (int i = 0; i < inst->length(); i++) {
2943           //     inst->at(i)->print();
2944           //     tty->print("\n");
2945           //   }
2946           //   tty->print("\n");
2947         }
2948 
2949         struct tableswitch *sw = &switches[tableswitch_count];
2950         sw->_insn_index = start_insn, sw->_first_key = first_key,
2951           sw->_last_key = last_key, sw->_reg = reg;
2952         inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
2953         {
2954           // Insert the new table of branches
2955           int offset = last_insn;
2956           for (int n = first_key; n < last_key; n++) {
2957             inst->insert_before
2958               (last_insn + 1,
2959                new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
2960                                 inst->at(offset)->as_OpBranch()->label()));
2961             offset -= 2, i++;
2962           }
2963         }
2964         // Delete all the old compare-and-branch instructions
2965         for (int n = first_key; n < last_key; n++) {
2966           inst->remove_at(start_insn);
2967           inst->remove_at(start_insn);
2968         }
2969         // Insert the tableswitch instruction
2970         inst->insert_before(start_insn,
2971                             new LIR_Op2(lir_cmp, lir_cond_always,
2972                                         LIR_OprFact::intConst(tableswitch_count),
2973                                         reg_opr));
2974         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
2975         tableswitch_count++;
2976       }
2977       reg = noreg;
2978       last_key = -2147483648;
2979     }
2980   next_state:
2981     ;
2982   }
2983 #endif
2984 }
2985 
2986 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
2987   Address addr = as_Address(src->as_address_ptr());
2988   BasicType type = src->type();
2989   bool is_oop = is_reference_type(type);
2990 
2991   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
2992   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
2993 
2994   switch(type) {
2995   case T_INT:
2996     xchg = &MacroAssembler::atomic_xchgalw;
2997     add = &MacroAssembler::atomic_addalw;
2998     break;
2999   case T_LONG:
3000     xchg = &MacroAssembler::atomic_xchgal;
3001     add = &MacroAssembler::atomic_addal;
3002     break;
3003   case T_OBJECT:
3004   case T_ARRAY:
3005     if (UseCompressedOops) {
3006       xchg = &MacroAssembler::atomic_xchgalw;
3007       add = &MacroAssembler::atomic_addalw;
3008     } else {
3009       xchg = &MacroAssembler::atomic_xchgal;
3010       add = &MacroAssembler::atomic_addal;
3011     }
3012     break;
3013   default:
3014     ShouldNotReachHere();
3015     xchg = &MacroAssembler::atomic_xchgal;
3016     add = &MacroAssembler::atomic_addal; // unreachable
3017   }
3018 
3019   switch (code) {
3020   case lir_xadd:
3021     {
3022       RegisterOrConstant inc;
3023       Register tmp = as_reg(tmp_op);
3024       Register dst = as_reg(dest);
3025       if (data->is_constant()) {
3026         inc = RegisterOrConstant(as_long(data));
3027         assert_different_registers(dst, addr.base(), tmp,
3028                                    rscratch1, rscratch2);
3029       } else {
3030         inc = RegisterOrConstant(as_reg(data));
3031         assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3032                                    rscratch1, rscratch2);
3033       }
3034       __ lea(tmp, addr);
3035       (_masm->*add)(dst, inc, tmp);
3036       break;
3037     }
3038   case lir_xchg:
3039     {
3040       Register tmp = tmp_op->as_register();
3041       Register obj = as_reg(data);
3042       Register dst = as_reg(dest);
3043       if (is_oop && UseCompressedOops) {
3044         __ encode_heap_oop(rscratch2, obj);
3045         obj = rscratch2;
3046       }
3047       assert_different_registers(obj, addr.base(), tmp, rscratch1);
3048       assert_different_registers(dst, addr.base(), tmp, rscratch1);
3049       __ lea(tmp, addr);
3050       (_masm->*xchg)(dst, obj, tmp);
3051       if (is_oop && UseCompressedOops) {
3052         __ decode_heap_oop(dst);
3053       }
3054     }
3055     break;
3056   default:
3057     ShouldNotReachHere();
3058   }
3059   if(!UseLSE) {
3060     __ membar(__ AnyAny);
3061   }
3062 }
3063 
3064 #undef __