1 /*
   2  * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright 2026 Arm Limited and/or its affiliates.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"

  36 #include "ci/ciInstance.hpp"

  37 #include "code/aotCodeCache.hpp"
  38 #include "code/compiledIC.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "gc/shared/gc_globals.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/objArrayKlass.hpp"

  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/threadIdentifier.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_aarch64.inline.hpp"
  49 
  50 
  51 #ifndef PRODUCT
  52 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  53 #else
  54 #define COMMENT(x)
  55 #endif
  56 
  57 NEEDS_CLEANUP // remove this definitions ?
  58 const Register SYNC_header = r0;   // synchronization header
  59 const Register SHIFT_count = r0;   // where count for shift operations must be
  60 
  61 #define __ _masm->
  62 
  63 
  64 static void select_different_registers(Register preserve,
  65                                        Register extra,
  66                                        Register &tmp1,
  67                                        Register &tmp2,
  68                                        Register &tmp3) {
  69   if (tmp1 == preserve) {
  70     assert_different_registers(tmp1, tmp2, tmp3, extra);
  71     tmp1 = extra;
  72   } else if (tmp2 == preserve) {
  73     assert_different_registers(tmp1, tmp2, tmp3, extra);
  74     tmp2 = extra;
  75   } else if (tmp3 == preserve) {
  76     assert_different_registers(tmp1, tmp2, tmp3, extra);
  77     tmp3 = extra;
  78   }
  79   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  80 }
  81 
  82 
  83 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  84 
  85 
  86 LIR_Opr LIR_Assembler::receiverOpr() {
  87   return FrameMap::receiver_opr;
  88 }
  89 
  90 LIR_Opr LIR_Assembler::osrBufferPointer() {
  91   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
  92 }
  93 
  94 //--------------fpu register translations-----------------------
  95 
  96 
  97 address LIR_Assembler::float_constant(float f) {
  98   address const_addr = __ float_constant(f);
  99   if (const_addr == nullptr) {
 100     bailout("const section overflow");
 101     return __ code()->consts()->start();
 102   } else {
 103     return const_addr;
 104   }
 105 }
 106 
 107 
 108 address LIR_Assembler::double_constant(double d) {
 109   address const_addr = __ double_constant(d);
 110   if (const_addr == nullptr) {
 111     bailout("const section overflow");
 112     return __ code()->consts()->start();
 113   } else {
 114     return const_addr;
 115   }
 116 }
 117 
 118 address LIR_Assembler::int_constant(jlong n) {
 119   address const_addr = __ long_constant(n);
 120   if (const_addr == nullptr) {
 121     bailout("const section overflow");
 122     return __ code()->consts()->start();
 123   } else {
 124     return const_addr;
 125   }
 126 }
 127 
 128 void LIR_Assembler::breakpoint() { Unimplemented(); }
 129 
 130 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 131 
 132 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 133 
 134 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
 135 //-------------------------------------------
 136 
 137 static Register as_reg(LIR_Opr op) {
 138   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 139 }
 140 
 141 static jlong as_long(LIR_Opr data) {
 142   jlong result;
 143   switch (data->type()) {
 144   case T_INT:
 145     result = (data->as_jint());
 146     break;
 147   case T_LONG:
 148     result = (data->as_jlong());
 149     break;
 150   default:
 151     ShouldNotReachHere();
 152     result = 0;  // unreachable
 153   }
 154   return result;
 155 }
 156 
 157 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 158   Register base = addr->base()->as_pointer_register();
 159   LIR_Opr opr = addr->index();
 160   if (opr->is_cpu_register()) {
 161     Register index;
 162     if (opr->is_single_cpu())
 163       index = opr->as_register();
 164     else
 165       index = opr->as_register_lo();
 166     assert(addr->disp() == 0, "must be");
 167     switch(opr->type()) {
 168       case T_INT:
 169         return Address(base, index, Address::sxtw(addr->scale()));
 170       case T_LONG:
 171         return Address(base, index, Address::lsl(addr->scale()));
 172       default:
 173         ShouldNotReachHere();
 174       }
 175   } else {
 176     assert(addr->scale() == 0,
 177            "expected for immediate operand, was: %d", addr->scale());
 178     ptrdiff_t offset = ptrdiff_t(addr->disp());
 179     // NOTE: Does not handle any 16 byte vector access.
 180     const uint type_size = type2aelembytes(addr->type(), true);
 181     return __ legitimize_address(Address(base, offset), type_size, tmp);
 182   }
 183   return Address();
 184 }
 185 
 186 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 187   ShouldNotReachHere();
 188   return Address();
 189 }
 190 
 191 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 192   return as_Address(addr, rscratch1);
 193 }
 194 
 195 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 196   return as_Address(addr, rscratch1);  // Ouch
 197   // FIXME: This needs to be much more clever.  See x86.
 198 }
 199 
 200 // Ensure a valid Address (base + offset) to a stack-slot. If stack access is
 201 // not encodable as a base + (immediate) offset, generate an explicit address
 202 // calculation to hold the address in a temporary register.
 203 Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
 204   precond(size == 4 || size == 8);
 205   Address addr = frame_map()->address_for_slot(index, adjust);
 206   precond(addr.getMode() == Address::base_plus_offset);
 207   precond(addr.base() == sp);
 208   precond(addr.offset() > 0);
 209   uint mask = size - 1;
 210   assert((addr.offset() & mask) == 0, "scaled offsets only");
 211   return __ legitimize_address(addr, size, tmp);
 212 }
 213 
 214 void LIR_Assembler::osr_entry() {
 215   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 216   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 217   ValueStack* entry_state = osr_entry->state();
 218   int number_of_locks = entry_state->locks_size();
 219 
 220   // we jump here if osr happens with the interpreter
 221   // state set up to continue at the beginning of the
 222   // loop that triggered osr - in particular, we have
 223   // the following registers setup:
 224   //
 225   // r2: osr buffer
 226   //
 227 
 228   // build frame
 229   ciMethod* m = compilation()->method();
 230   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 231 
 232   // OSR buffer is
 233   //
 234   // locals[nlocals-1..0]
 235   // monitors[0..number_of_locks]
 236   //
 237   // locals is a direct copy of the interpreter frame so in the osr buffer
 238   // so first slot in the local array is the last local from the interpreter
 239   // and last slot is local[0] (receiver) from the interpreter
 240   //
 241   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 242   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 243   // in the interpreter frame (the method lock if a sync method)
 244 
 245   // Initialize monitors in the compiled activation.
 246   //   r2: pointer to osr buffer
 247   //
 248   // All other registers are dead at this point and the locals will be
 249   // copied into place by code emitted in the IR.
 250 
 251   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 252   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 253     int monitor_offset = BytesPerWord * method()->max_locals() +
 254       (2 * BytesPerWord) * (number_of_locks - 1);
 255     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 256     // the OSR buffer using 2 word entries: first the lock and then
 257     // the oop.
 258     for (int i = 0; i < number_of_locks; i++) {
 259       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 260 #ifdef ASSERT
 261       // verify the interpreter's monitor has a non-null object
 262       {
 263         Label L;
 264         __ ldr(rscratch1, __ form_address(rscratch1, OSR_buf, slot_offset + 1*BytesPerWord, 0));
 265         __ cbnz(rscratch1, L);
 266         __ stop("locked object is null");
 267         __ bind(L);
 268       }
 269 #endif
 270       __ ldr(r19, __ form_address(rscratch1, OSR_buf, slot_offset, 0));
 271       __ ldr(r20, __ form_address(rscratch1, OSR_buf, slot_offset + BytesPerWord, 0));
 272       __ str(r19, frame_map()->address_for_monitor_lock(i));
 273       __ str(r20, frame_map()->address_for_monitor_object(i));
 274     }
 275   }
 276 }
 277 
 278 
 279 // inline cache check; done before the frame is built.
 280 int LIR_Assembler::check_icache() {
 281   return __ ic_check(CodeEntryAlignment);
 282 }
 283 
 284 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 285   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 286   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
 287 
 288   Label L_skip_barrier;
 289 
 290   __ mov_metadata(rscratch2, method->holder()->constant_encoding());
 291   __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);
 292   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 293   __ bind(L_skip_barrier);
 294 }
 295 
 296 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 297   if (o == nullptr) {
 298     __ mov(reg, zr);
 299   } else {
 300     __ movoop(reg, o);
 301   }
 302 }
 303 
 304 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 305   address target = nullptr;
 306   relocInfo::relocType reloc_type = relocInfo::none;
 307 
 308   switch (patching_id(info)) {
 309   case PatchingStub::access_field_id:
 310     target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
 311     reloc_type = relocInfo::section_word_type;
 312     break;
 313   case PatchingStub::load_klass_id:
 314     target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
 315     reloc_type = relocInfo::metadata_type;
 316     break;
 317   case PatchingStub::load_mirror_id:
 318     target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
 319     reloc_type = relocInfo::oop_type;
 320     break;
 321   case PatchingStub::load_appendix_id:
 322     target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
 323     reloc_type = relocInfo::oop_type;
 324     break;
 325   default: ShouldNotReachHere();
 326   }
 327 
 328   __ far_call(RuntimeAddress(target));
 329   add_call_info_here(info);
 330 }
 331 
 332 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 333   deoptimize_trap(info);
 334 }
 335 
 336 
 337 // This specifies the rsp decrement needed to build the frame
 338 int LIR_Assembler::initial_frame_size_in_bytes() const {
 339   // if rounding, must let FrameMap know!
 340 
 341   return in_bytes(frame_map()->framesize_in_bytes());
 342 }
 343 
 344 
 345 int LIR_Assembler::emit_exception_handler() {
 346   // generate code for exception handler
 347   address handler_base = __ start_a_stub(exception_handler_size());
 348   if (handler_base == nullptr) {
 349     // not enough space left for the handler
 350     bailout("exception handler overflow");
 351     return -1;
 352   }
 353 
 354   int offset = code_offset();
 355 
 356   // the exception oop and pc are in r0, and r3
 357   // no other registers need to be preserved, so invalidate them
 358   __ invalidate_registers(false, true, true, false, true, true);
 359 
 360   // check that there is really an exception
 361   __ verify_not_null_oop(r0);
 362 
 363   // search an exception handler (r0: exception oop, r3: throwing pc)
 364   __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
 365   __ should_not_reach_here();
 366   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 367   __ end_a_stub();
 368 
 369   return offset;
 370 }
 371 
 372 
 373 // Emit the code to remove the frame from the stack in the exception
 374 // unwind path.
 375 int LIR_Assembler::emit_unwind_handler() {
 376 #ifndef PRODUCT
 377   if (CommentedAssembly) {
 378     _masm->block_comment("Unwind handler");
 379   }
 380 #endif
 381 
 382   int offset = code_offset();
 383 
 384   // Fetch the exception from TLS and clear out exception related thread state
 385   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 386   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 387   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 388 
 389   __ bind(_unwind_handler_entry);
 390   __ verify_not_null_oop(r0);
 391   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 392     __ mov(r19, r0);  // Preserve the exception
 393   }
 394 
 395   // Perform needed unlocking
 396   MonitorExitStub* stub = nullptr;
 397   if (method()->is_synchronized()) {
 398     monitor_address(0, FrameMap::r0_opr);
 399     stub = new MonitorExitStub(FrameMap::r0_opr, 0);
 400     __ unlock_object(r5, r4, r0, r6, *stub->entry());
 401     __ bind(*stub->continuation());
 402   }
 403 
 404   if (compilation()->env()->dtrace_method_probes()) {
 405     __ mov(c_rarg0, rthread);
 406     __ mov_metadata(c_rarg1, method()->constant_encoding());
 407     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 408   }
 409 
 410   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 411     __ mov(r0, r19);  // Restore the exception
 412   }
 413 
 414   // remove the activation and dispatch to the unwind handler
 415   __ block_comment("remove_frame and dispatch to the unwind handler");
 416   __ remove_frame(initial_frame_size_in_bytes());
 417   __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
 418 
 419   // Emit the slow path assembly
 420   if (stub != nullptr) {
 421     stub->emit_code(this);
 422   }
 423 
 424   return offset;
 425 }
 426 
 427 
 428 int LIR_Assembler::emit_deopt_handler() {
 429   // generate code for exception handler
 430   address handler_base = __ start_a_stub(deopt_handler_size());
 431   if (handler_base == nullptr) {
 432     // not enough space left for the handler
 433     bailout("deopt handler overflow");
 434     return -1;
 435   }
 436 
 437   int offset = code_offset();
 438 
 439   Label start;
 440   __ bind(start);
 441 
 442   __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 443 
 444   int entry_offset = __ offset();
 445   __ b(start);
 446 
 447   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 448   assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
 449          "out of bounds read in post-call NOP check");
 450   __ end_a_stub();
 451 
 452   return entry_offset;
 453 }
 454 
 455 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 456   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 457   int pc_offset = code_offset();
 458   flush_debug_info(pc_offset);
 459   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 460   if (info->exception_handlers() != nullptr) {
 461     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 462   }
 463 }
 464 
 465 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 466   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 467 










































 468   // Pop the stack before the safepoint code
 469   __ remove_frame(initial_frame_size_in_bytes());
 470 
 471   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 472     __ reserved_stack_check();
 473   }
 474 
 475   code_stub->set_safepoint_offset(__ offset());
 476   __ relocate(relocInfo::poll_return_type);
 477   __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
 478   __ ret(lr);
 479 }
 480 




 481 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 482   guarantee(info != nullptr, "Shouldn't be null");
 483   __ get_polling_page(rscratch1, relocInfo::poll_type);
 484   add_debug_info_for_branch(info);  // This isn't just debug info:
 485                                     // it's the oop map
 486   __ read_polling_page(rscratch1, relocInfo::poll_type);
 487   return __ offset();
 488 }
 489 
 490 
 491 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 492   if (from_reg == r31_sp)
 493     from_reg = sp;
 494   if (to_reg == r31_sp)
 495     to_reg = sp;
 496   __ mov(to_reg, from_reg);
 497 }
 498 
 499 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 500 
 501 
 502 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 503   assert(src->is_constant(), "should not call otherwise");
 504   assert(dest->is_register(), "should not call otherwise");
 505   LIR_Const* c = src->as_constant_ptr();
 506 
 507   switch (c->type()) {
 508     case T_INT: {
 509       assert(patch_code == lir_patch_none, "no patching handled here");
 510       __ movw(dest->as_register(), c->as_jint());
 511       break;
 512     }
 513 
 514     case T_ADDRESS: {
 515       assert(patch_code == lir_patch_none, "no patching handled here");
 516       __ mov(dest->as_register(), c->as_jint());
 517       break;
 518     }
 519 
 520     case T_LONG: {
 521       assert(patch_code == lir_patch_none, "no patching handled here");
 522 #if INCLUDE_CDS
 523       if (AOTCodeCache::is_on_for_dump()) {
 524         address b = c->as_pointer();
 525         if (b == (address)ThreadIdentifier::unsafe_offset()) {
 526           __ lea(dest->as_register_lo(), ExternalAddress(b));
 527           break;
 528         }
 529         if (AOTRuntimeConstants::contains(b)) {
 530           __ load_aotrc_address(dest->as_register_lo(), b);
 531           break;
 532         }
 533       }
 534 #endif
 535       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 536       break;
 537     }
 538 
 539     case T_OBJECT: {
 540         if (patch_code == lir_patch_none) {
 541           jobject2reg(c->as_jobject(), dest->as_register());
 542         } else {
 543           jobject2reg_with_patching(dest->as_register(), info);


 544         }
 545       break;
 546     }
 547 
 548     case T_METADATA: {
 549       if (patch_code != lir_patch_none) {
 550         klass2reg_with_patching(dest->as_register(), info);
 551       } else {
 552         __ mov_metadata(dest->as_register(), c->as_metadata());
 553       }
 554       break;
 555     }
 556 
 557     case T_FLOAT: {
 558       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 559         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 560       } else {
 561         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 562         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 563       }
 564       break;
 565     }
 566 
 567     case T_DOUBLE: {
 568       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 569         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 570       } else {
 571         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 572         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 573       }
 574       break;
 575     }
 576 
 577     default:
 578       ShouldNotReachHere();
 579   }
 580 }
 581 
 582 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 583   LIR_Const* c = src->as_constant_ptr();
 584   switch (c->type()) {
 585   case T_OBJECT:
 586     {
 587       if (! c->as_jobject())
 588         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 589       else {
 590         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
 591         reg2stack(FrameMap::rscratch1_opr, dest, c->type());
 592       }
 593     }
 594     break;
 595   case T_ADDRESS:
 596     {
 597       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
 598       reg2stack(FrameMap::rscratch1_opr, dest, c->type());
 599     }
 600   case T_INT:
 601   case T_FLOAT:
 602     {
 603       Register reg = zr;
 604       if (c->as_jint_bits() == 0)
 605         __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 606       else {
 607         __ movw(rscratch1, c->as_jint_bits());
 608         __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
 609       }
 610     }
 611     break;
 612   case T_LONG:
 613   case T_DOUBLE:
 614     {
 615       Register reg = zr;
 616       if (c->as_jlong_bits() == 0)
 617         __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 618                                                  lo_word_offset_in_bytes));
 619       else {
 620         __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
 621         __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
 622                                                         lo_word_offset_in_bytes));
 623       }
 624     }
 625     break;
 626   default:
 627     ShouldNotReachHere();
 628   }
 629 }
 630 
 631 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 632   assert(src->is_constant(), "should not call otherwise");
 633   LIR_Const* c = src->as_constant_ptr();
 634   LIR_Address* to_addr = dest->as_address_ptr();
 635 
 636   void (Assembler::* insn)(Register Rt, const Address &adr);
 637 
 638   switch (type) {
 639   case T_ADDRESS:
 640     assert(c->as_jint() == 0, "should be");
 641     insn = &Assembler::str;
 642     break;
 643   case T_LONG:
 644     assert(c->as_jlong() == 0, "should be");
 645     insn = &Assembler::str;
 646     break;
 647   case T_INT:
 648     assert(c->as_jint() == 0, "should be");
 649     insn = &Assembler::strw;
 650     break;
 651   case T_OBJECT:
 652   case T_ARRAY:
 653     assert(c->as_jobject() == nullptr, "should be");
 654     if (UseCompressedOops && !wide) {
 655       insn = &Assembler::strw;
 656     } else {
 657       insn = &Assembler::str;
 658     }
 659     break;
 660   case T_CHAR:
 661   case T_SHORT:
 662     assert(c->as_jint() == 0, "should be");
 663     insn = &Assembler::strh;
 664     break;
 665   case T_BOOLEAN:
 666   case T_BYTE:
 667     assert(c->as_jint() == 0, "should be");
 668     insn = &Assembler::strb;
 669     break;
 670   default:
 671     ShouldNotReachHere();
 672     insn = &Assembler::str;  // unreachable
 673   }
 674 
 675   if (info) add_debug_info_for_null_check_here(info);
 676   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 677 }
 678 
 679 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 680   assert(src->is_register(), "should not call otherwise");
 681   assert(dest->is_register(), "should not call otherwise");
 682 
 683   // move between cpu-registers
 684   if (dest->is_single_cpu()) {
 685     if (src->type() == T_LONG) {
 686       // Can do LONG -> OBJECT
 687       move_regs(src->as_register_lo(), dest->as_register());
 688       return;
 689     }
 690     assert(src->is_single_cpu(), "must match");
 691     if (src->type() == T_OBJECT) {
 692       __ verify_oop(src->as_register());
 693     }
 694     move_regs(src->as_register(), dest->as_register());
 695 
 696   } else if (dest->is_double_cpu()) {
 697     if (is_reference_type(src->type())) {
 698       // Surprising to me but we can see move of a long to t_object
 699       __ verify_oop(src->as_register());
 700       move_regs(src->as_register(), dest->as_register_lo());
 701       return;
 702     }
 703     assert(src->is_double_cpu(), "must match");
 704     Register f_lo = src->as_register_lo();
 705     Register f_hi = src->as_register_hi();
 706     Register t_lo = dest->as_register_lo();
 707     Register t_hi = dest->as_register_hi();
 708     assert(f_hi == f_lo, "must be same");
 709     assert(t_hi == t_lo, "must be same");
 710     move_regs(f_lo, t_lo);
 711 
 712   } else if (dest->is_single_fpu()) {
 713     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 714 
 715   } else if (dest->is_double_fpu()) {
 716     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 717 
 718   } else {
 719     ShouldNotReachHere();
 720   }
 721 }
 722 
 723 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 724   precond(src->is_register() && dest->is_stack());
 725 
 726   uint const c_sz32 = sizeof(uint32_t);
 727   uint const c_sz64 = sizeof(uint64_t);
 728 
 729   if (src->is_single_cpu()) {
 730     int index = dest->single_stack_ix();
 731     if (is_reference_type(type)) {
 732       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 733       __ verify_oop(src->as_register());
 734     } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
 735       __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 736     } else {
 737       __ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 738     }
 739 
 740   } else if (src->is_double_cpu()) {
 741     int index = dest->double_stack_ix();
 742     Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 743     __ str(src->as_register_lo(), dest_addr_LO);
 744 
 745   } else if (src->is_single_fpu()) {
 746     int index = dest->single_stack_ix();
 747     __ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 748 
 749   } else if (src->is_double_fpu()) {
 750     int index = dest->double_stack_ix();
 751     __ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 752 
 753   } else {
 754     ShouldNotReachHere();
 755   }
 756 }
 757 
 758 
 759 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 760   LIR_Address* to_addr = dest->as_address_ptr();
 761   PatchingStub* patch = nullptr;
 762   Register compressed_src = rscratch1;
 763 
 764   if (patch_code != lir_patch_none) {
 765     deoptimize_trap(info);
 766     return;
 767   }
 768 
 769   if (is_reference_type(type)) {
 770     __ verify_oop(src->as_register());
 771 
 772     if (UseCompressedOops && !wide) {
 773       __ encode_heap_oop(compressed_src, src->as_register());
 774     } else {
 775       compressed_src = src->as_register();
 776     }
 777   }
 778 
 779   int null_check_here = code_offset();
 780   switch (type) {
 781     case T_FLOAT: {
 782       __ strs(src->as_float_reg(), as_Address(to_addr));
 783       break;
 784     }
 785 
 786     case T_DOUBLE: {
 787       __ strd(src->as_double_reg(), as_Address(to_addr));
 788       break;
 789     }
 790 
 791     case T_ARRAY:   // fall through
 792     case T_OBJECT:  // fall through
 793       if (UseCompressedOops && !wide) {
 794         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 795       } else {
 796          __ str(compressed_src, as_Address(to_addr));
 797       }
 798       break;
 799     case T_METADATA:
 800       // We get here to store a method pointer to the stack to pass to
 801       // a dtrace runtime call. This can't work on 64 bit with
 802       // compressed klass ptrs: T_METADATA can be a compressed klass
 803       // ptr or a 64 bit method pointer.
 804       ShouldNotReachHere();
 805       __ str(src->as_register(), as_Address(to_addr));
 806       break;
 807     case T_ADDRESS:
 808       __ str(src->as_register(), as_Address(to_addr));
 809       break;
 810     case T_INT:
 811       __ strw(src->as_register(), as_Address(to_addr));
 812       break;
 813 
 814     case T_LONG: {
 815       __ str(src->as_register_lo(), as_Address_lo(to_addr));
 816       break;
 817     }
 818 
 819     case T_BYTE:    // fall through
 820     case T_BOOLEAN: {
 821       __ strb(src->as_register(), as_Address(to_addr));
 822       break;
 823     }
 824 
 825     case T_CHAR:    // fall through
 826     case T_SHORT:
 827       __ strh(src->as_register(), as_Address(to_addr));
 828       break;
 829 
 830     default:
 831       ShouldNotReachHere();
 832   }
 833   if (info != nullptr) {
 834     add_debug_info_for_null_check(null_check_here, info);
 835   }
 836 }
 837 
 838 
 839 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 840   precond(src->is_stack() && dest->is_register());
 841 
 842   uint const c_sz32 = sizeof(uint32_t);
 843   uint const c_sz64 = sizeof(uint64_t);
 844 
 845   if (dest->is_single_cpu()) {
 846     int index = src->single_stack_ix();
 847     if (is_reference_type(type)) {
 848       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 849       __ verify_oop(dest->as_register());
 850     } else if (type == T_METADATA || type == T_ADDRESS) {
 851       __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
 852     } else {
 853       __ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));
 854     }
 855 
 856   } else if (dest->is_double_cpu()) {
 857     int index = src->double_stack_ix();
 858     Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
 859     __ ldr(dest->as_register_lo(), src_addr_LO);
 860 
 861   } else if (dest->is_single_fpu()) {
 862     int index = src->single_stack_ix();
 863     __ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
 864 
 865   } else if (dest->is_double_fpu()) {
 866     int index = src->double_stack_ix();
 867     __ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
 868 
 869   } else {
 870     ShouldNotReachHere();
 871   }
 872 }
 873 
 874 
 875 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 876   address target = nullptr;
 877   relocInfo::relocType reloc_type = relocInfo::none;
 878 
 879   switch (patching_id(info)) {
 880   case PatchingStub::access_field_id:
 881     target = Runtime1::entry_for(StubId::c1_access_field_patching_id);
 882     reloc_type = relocInfo::section_word_type;
 883     break;
 884   case PatchingStub::load_klass_id:
 885     target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
 886     reloc_type = relocInfo::metadata_type;
 887     break;
 888   case PatchingStub::load_mirror_id:
 889     target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
 890     reloc_type = relocInfo::oop_type;
 891     break;
 892   case PatchingStub::load_appendix_id:
 893     target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
 894     reloc_type = relocInfo::oop_type;
 895     break;
 896   default: ShouldNotReachHere();
 897   }
 898 
 899   __ far_call(RuntimeAddress(target));
 900   add_call_info_here(info);
 901 }
 902 
 903 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 904 
 905   LIR_Opr temp;
 906   if (type == T_LONG || type == T_DOUBLE)
 907     temp = FrameMap::rscratch1_long_opr;
 908   else
 909     temp = FrameMap::rscratch1_opr;
 910 
 911   stack2reg(src, temp, src->type());
 912   reg2stack(temp, dest, dest->type());
 913 }
 914 
 915 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
 916                             LIR_PatchCode patch_code, CodeEmitInfo* info,
 917                             bool wide) {
 918   mem2reg(src, dest, type, patch_code, info, wide, false);
 919 }
 920 
 921 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
 922                             LIR_PatchCode patch_code, CodeEmitInfo* info,
 923                             bool wide, bool is_volatile) {
 924   LIR_Address* addr = src->as_address_ptr();
 925   LIR_Address* from_addr = src->as_address_ptr();
 926 
 927   if (addr->base()->type() == T_OBJECT) {
 928     __ verify_oop(addr->base()->as_pointer_register());
 929   }
 930 
 931   if (patch_code != lir_patch_none) {
 932     deoptimize_trap(info);
 933     return;
 934   }
 935 
 936   if (is_volatile) {
 937     load_volatile(from_addr, dest, type, info);
 938   } else {
 939     load_unordered(from_addr, dest, type, wide, info);
 940   }
 941 
 942   if (is_reference_type(type)) {
 943     if (UseCompressedOops && !wide) {
 944       __ decode_heap_oop(dest->as_register());
 945     }
 946 
 947     __ verify_oop(dest->as_register());
 948   }
 949 }
 950 
 951 void LIR_Assembler::load_unordered(LIR_Address *from_addr, LIR_Opr dest,
 952                                    BasicType type, bool wide, CodeEmitInfo* info) {
 953   if (info != nullptr) {
 954     add_debug_info_for_null_check_here(info);
 955   }
 956 
 957   switch (type) {
 958     case T_FLOAT: {
 959       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 960       break;
 961     }
 962 
 963     case T_DOUBLE: {
 964       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 965       break;
 966     }
 967 
 968     case T_ARRAY:   // fall through
 969     case T_OBJECT:  // fall through
 970       if (UseCompressedOops && !wide) {
 971         __ ldrw(dest->as_register(), as_Address(from_addr));
 972       } else {
 973         __ ldr(dest->as_register(), as_Address(from_addr));
 974       }
 975       break;
 976     case T_METADATA:
 977       // We get here to store a method pointer to the stack to pass to
 978       // a dtrace runtime call. This can't work on 64 bit with
 979       // compressed klass ptrs: T_METADATA can be a compressed klass
 980       // ptr or a 64 bit method pointer.
 981       ShouldNotReachHere();
 982       __ ldr(dest->as_register(), as_Address(from_addr));
 983       break;
 984     case T_ADDRESS:
 985       __ ldr(dest->as_register(), as_Address(from_addr));
 986       break;
 987     case T_INT:
 988       __ ldrw(dest->as_register(), as_Address(from_addr));
 989       break;
 990 
 991     case T_LONG: {
 992       __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
 993       break;
 994     }
 995 
 996     case T_BYTE:
 997       __ ldrsb(dest->as_register(), as_Address(from_addr));
 998       break;
 999     case T_BOOLEAN: {
1000       __ ldrb(dest->as_register(), as_Address(from_addr));
1001       break;
1002     }
1003 
1004     case T_CHAR:
1005       __ ldrh(dest->as_register(), as_Address(from_addr));
1006       break;
1007     case T_SHORT:
1008       __ ldrsh(dest->as_register(), as_Address(from_addr));
1009       break;
1010 
1011     default:
1012       ShouldNotReachHere();
1013   }
1014 }
1015 















1016 void LIR_Assembler::load_volatile(LIR_Address *from_addr, LIR_Opr dest,
1017                                   BasicType type, CodeEmitInfo* info) {
1018   __ lea(rscratch1, as_Address(from_addr));
1019 
1020   Register dest_reg = rscratch2;
1021   if (!is_floating_point_type(type)) {
1022     dest_reg = (dest->is_single_cpu()
1023                 ? dest->as_register() : dest->as_register_lo());
1024   }
1025 
1026   if (info != nullptr) {
1027     add_debug_info_for_null_check_here(info);
1028   }
1029 
1030   // Uses LDAR to ensure memory ordering.
1031   __ load_store_volatile(dest_reg, type, rscratch1, /*is_load*/true);
1032 
1033   switch (type) {
1034     // LDAR is unsigned so need to sign-extend for byte and short
1035     case T_BYTE:
1036       __ sxtb(dest_reg, dest_reg);
1037       break;
1038     case T_SHORT:
1039       __ sxth(dest_reg, dest_reg);
1040       break;
1041     // need to move from GPR to FPR after LDAR with FMOV for floating types
1042     case T_FLOAT:
1043       __ fmovs(dest->as_float_reg(), dest_reg);
1044       break;
1045     case T_DOUBLE:
1046       __ fmovd(dest->as_double_reg(), dest_reg);
1047       break;
1048     default:
1049       break;
1050   }
1051 }
1052 
1053 int LIR_Assembler::array_element_size(BasicType type) const {
1054   int elem_size = type2aelembytes(type);
1055   return exact_log2(elem_size);
1056 }
1057 
1058 
1059 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1060   switch (op->code()) {
1061   case lir_idiv:
1062   case lir_irem:
1063     arithmetic_idiv(op->code(),
1064                     op->in_opr1(),
1065                     op->in_opr2(),
1066                     op->in_opr3(),
1067                     op->result_opr(),
1068                     op->info());
1069     break;
1070   case lir_fmad:
1071     __ fmaddd(op->result_opr()->as_double_reg(),
1072               op->in_opr1()->as_double_reg(),
1073               op->in_opr2()->as_double_reg(),
1074               op->in_opr3()->as_double_reg());
1075     break;
1076   case lir_fmaf:
1077     __ fmadds(op->result_opr()->as_float_reg(),
1078               op->in_opr1()->as_float_reg(),
1079               op->in_opr2()->as_float_reg(),
1080               op->in_opr3()->as_float_reg());
1081     break;
1082   default:      ShouldNotReachHere(); break;
1083   }
1084 }
1085 
1086 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1087 #ifdef ASSERT
1088   assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1089   if (op->block() != nullptr)  _branch_target_blocks.append(op->block());
1090   if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1091 #endif
1092 
1093   if (op->cond() == lir_cond_always) {
1094     if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1095     __ b(*(op->label()));
1096   } else {
1097     Assembler::Condition acond;
1098     if (op->code() == lir_cond_float_branch) {
1099       bool is_unordered = (op->ublock() == op->block());
1100       // Assembler::EQ does not permit unordered branches, so we add
1101       // another branch here.  Likewise, Assembler::NE does not permit
1102       // ordered branches.
1103       if ((is_unordered && op->cond() == lir_cond_equal)
1104           || (!is_unordered && op->cond() == lir_cond_notEqual))
1105         __ br(Assembler::VS, *(op->ublock()->label()));
1106       switch(op->cond()) {
1107       case lir_cond_equal:        acond = Assembler::EQ; break;
1108       case lir_cond_notEqual:     acond = Assembler::NE; break;
1109       case lir_cond_less:         acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1110       case lir_cond_lessEqual:    acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1111       case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1112       case lir_cond_greater:      acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1113       default:                    ShouldNotReachHere();
1114         acond = Assembler::EQ;  // unreachable
1115       }
1116     } else {
1117       switch (op->cond()) {
1118         case lir_cond_equal:        acond = Assembler::EQ; break;
1119         case lir_cond_notEqual:     acond = Assembler::NE; break;
1120         case lir_cond_less:         acond = Assembler::LT; break;
1121         case lir_cond_lessEqual:    acond = Assembler::LE; break;
1122         case lir_cond_greaterEqual: acond = Assembler::GE; break;
1123         case lir_cond_greater:      acond = Assembler::GT; break;
1124         case lir_cond_belowEqual:   acond = Assembler::LS; break;
1125         case lir_cond_aboveEqual:   acond = Assembler::HS; break;
1126         default:                    ShouldNotReachHere();
1127           acond = Assembler::EQ;  // unreachable
1128       }
1129     }
1130     __ br(acond,*(op->label()));
1131   }
1132 }
1133 
1134 
1135 
1136 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1137   LIR_Opr src  = op->in_opr();
1138   LIR_Opr dest = op->result_opr();
1139 
1140   switch (op->bytecode()) {
1141     case Bytecodes::_i2f:
1142       {
1143         __ scvtfws(dest->as_float_reg(), src->as_register());
1144         break;
1145       }
1146     case Bytecodes::_i2d:
1147       {
1148         __ scvtfwd(dest->as_double_reg(), src->as_register());
1149         break;
1150       }
1151     case Bytecodes::_l2d:
1152       {
1153         __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1154         break;
1155       }
1156     case Bytecodes::_l2f:
1157       {
1158         __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1159         break;
1160       }
1161     case Bytecodes::_f2d:
1162       {
1163         __ fcvts(dest->as_double_reg(), src->as_float_reg());
1164         break;
1165       }
1166     case Bytecodes::_d2f:
1167       {
1168         __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1169         break;
1170       }
1171     case Bytecodes::_i2c:
1172       {
1173         __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1174         break;
1175       }
1176     case Bytecodes::_i2l:
1177       {
1178         __ sxtw(dest->as_register_lo(), src->as_register());
1179         break;
1180       }
1181     case Bytecodes::_i2s:
1182       {
1183         __ sxth(dest->as_register(), src->as_register());
1184         break;
1185       }
1186     case Bytecodes::_i2b:
1187       {
1188         __ sxtb(dest->as_register(), src->as_register());
1189         break;
1190       }
1191     case Bytecodes::_l2i:
1192       {
1193         _masm->block_comment("FIXME: This could be a no-op");
1194         __ uxtw(dest->as_register(), src->as_register_lo());
1195         break;
1196       }
1197     case Bytecodes::_d2l:
1198       {
1199         __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1200         break;
1201       }
1202     case Bytecodes::_f2i:
1203       {
1204         __ fcvtzsw(dest->as_register(), src->as_float_reg());
1205         break;
1206       }
1207     case Bytecodes::_f2l:
1208       {
1209         __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1210         break;
1211       }
1212     case Bytecodes::_d2i:
1213       {
1214         __ fcvtzdw(dest->as_register(), src->as_double_reg());
1215         break;
1216       }
1217     default: ShouldNotReachHere();
1218   }
1219 }
1220 
1221 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1222   if (op->init_check()) {
1223     __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1224     __ ldarb(rscratch1, rscratch1);
1225     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1226     add_debug_info_for_null_check_here(op->stub()->info());
1227     __ br(Assembler::NE, *op->stub()->entry());
1228   }
1229   __ allocate_object(op->obj()->as_register(),
1230                      op->tmp1()->as_register(),
1231                      op->tmp2()->as_register(),
1232                      op->header_size(),
1233                      op->object_size(),
1234                      op->klass()->as_register(),
1235                      *op->stub()->entry());
1236   __ bind(*op->stub()->continuation());
1237 }
1238 
1239 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1240   Register len =  op->len()->as_register();
1241   __ uxtw(len, len);
1242 
1243   if (UseSlowPath ||
1244       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1245       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1246     __ b(*op->stub()->entry());
1247   } else {
1248     Register tmp1 = op->tmp1()->as_register();
1249     Register tmp2 = op->tmp2()->as_register();
1250     Register tmp3 = op->tmp3()->as_register();
1251     if (len == tmp1) {
1252       tmp1 = tmp3;
1253     } else if (len == tmp2) {
1254       tmp2 = tmp3;
1255     } else if (len == tmp3) {
1256       // everything is ok
1257     } else {
1258       __ mov(tmp3, len);
1259     }
1260     __ allocate_array(op->obj()->as_register(),
1261                       len,
1262                       tmp1,
1263                       tmp2,
1264                       arrayOopDesc::base_offset_in_bytes(op->type()),
1265                       array_element_size(op->type()),
1266                       op->klass()->as_register(),
1267                       *op->stub()->entry(),
1268                       op->zero_array());
1269   }
1270   __ bind(*op->stub()->continuation());
1271 }
1272 
1273 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
1274                                         ciProfileData *data, Register recv) {
1275 
1276   int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1277   __ profile_receiver_type(recv, mdo, mdp_offset);
1278 }
1279 
1280 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1281   // we always need a stub for the failure case.
1282   CodeStub* stub = op->stub();
1283   Register obj = op->object()->as_register();
1284   Register k_RInfo = op->tmp1()->as_register();
1285   Register klass_RInfo = op->tmp2()->as_register();
1286   Register dst = op->result_opr()->as_register();
1287   ciKlass* k = op->klass();
1288   Register Rtmp1 = noreg;
1289 
1290   // check if it needs to be profiled
1291   ciMethodData* md;
1292   ciProfileData* data;
1293 
1294   const bool should_profile = op->should_profile();
1295 
1296   if (should_profile) {
1297     ciMethod* method = op->profiled_method();
1298     assert(method != nullptr, "Should have method");
1299     int bci = op->profiled_bci();
1300     md = method->method_data_or_null();
1301     assert(md != nullptr, "Sanity");
1302     data = md->bci_to_data(bci);
1303     assert(data != nullptr,                "need data for type check");
1304     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1305   }
1306   Label* success_target = success;
1307   Label* failure_target = failure;
1308 
1309   if (obj == k_RInfo) {
1310     k_RInfo = dst;
1311   } else if (obj == klass_RInfo) {
1312     klass_RInfo = dst;
1313   }
1314 
1315   Rtmp1 = op->tmp3()->as_register();
1316   select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1317 
1318   assert_different_registers(obj, k_RInfo, klass_RInfo);
1319 
1320   if (should_profile) {
1321     Register mdo  = klass_RInfo;
1322     __ mov_metadata(mdo, md->constant_encoding());
1323     Label not_null;
1324     __ cbnz(obj, not_null);
1325     // Object is null; update MDO and exit
1326     Address data_addr
1327       = __ form_address(rscratch2, mdo,
1328                         md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1329                         0);
1330     __ ldrb(rscratch1, data_addr);
1331     __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1332     __ strb(rscratch1, data_addr);
1333     __ b(*obj_is_null);
1334     __ bind(not_null);
1335 
1336     Register recv = k_RInfo;
1337     __ load_klass(recv, obj);
1338     type_profile_helper(mdo, md, data, recv);
1339   } else {
1340     __ cbz(obj, *obj_is_null);


1341   }
1342 
1343   if (!k->is_loaded()) {
1344     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1345   } else {
1346     __ mov_metadata(k_RInfo, k->constant_encoding());
1347   }
1348   __ verify_oop(obj);
1349 
1350   if (op->fast_check()) {

1351     // get object class
1352     // not a safepoint as obj null check happens earlier
1353     __ load_klass(rscratch1, obj);
1354     __ cmp( rscratch1, k_RInfo);
1355 
1356     __ br(Assembler::NE, *failure_target);
1357     // successful cast, fall through to profile or jump
1358   } else {
1359     // get object class
1360     // not a safepoint as obj null check happens earlier
1361     __ load_klass(klass_RInfo, obj);
1362     if (k->is_loaded()) {
1363       // See if we get an immediate positive hit
1364       __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1365       __ cmp(k_RInfo, rscratch1);
1366       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1367         __ br(Assembler::NE, *failure_target);
1368         // successful cast, fall through to profile or jump
1369       } else {
1370         // See if we get an immediate positive hit
1371         __ br(Assembler::EQ, *success_target);
1372         // check for self
1373         __ cmp(klass_RInfo, k_RInfo);







1374         __ br(Assembler::EQ, *success_target);
1375 
1376         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1377         __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1378         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1379         // result is a boolean
1380         __ cbzw(klass_RInfo, *failure_target);
1381         // successful cast, fall through to profile or jump
1382       }
1383     } else {
1384       // perform the fast part of the checking logic
1385       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1386       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1387       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1388       __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1389       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1390       // result is a boolean
1391       __ cbz(k_RInfo, *failure_target);
1392       // successful cast, fall through to profile or jump
1393     }
1394   }
1395   __ b(*success);
1396 }
1397 
1398 
1399 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1400   const bool should_profile = op->should_profile();
1401 
1402   LIR_Code code = op->code();
1403   if (code == lir_store_check) {
1404     Register value = op->object()->as_register();
1405     Register array = op->array()->as_register();
1406     Register k_RInfo = op->tmp1()->as_register();
1407     Register klass_RInfo = op->tmp2()->as_register();
1408     Register Rtmp1 = op->tmp3()->as_register();
1409 
1410     CodeStub* stub = op->stub();
1411 
1412     // check if it needs to be profiled
1413     ciMethodData* md;
1414     ciProfileData* data;
1415 
1416     if (should_profile) {
1417       ciMethod* method = op->profiled_method();
1418       assert(method != nullptr, "Should have method");
1419       int bci = op->profiled_bci();
1420       md = method->method_data_or_null();
1421       assert(md != nullptr, "Sanity");
1422       data = md->bci_to_data(bci);
1423       assert(data != nullptr,                "need data for type check");
1424       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1425     }
1426     Label done;
1427     Label* success_target = &done;
1428     Label* failure_target = stub->entry();
1429 
1430     if (should_profile) {
1431       Label not_null;
1432       Register mdo  = klass_RInfo;
1433       __ mov_metadata(mdo, md->constant_encoding());
1434       __ cbnz(value, not_null);
1435       // Object is null; update MDO and exit
1436       Address data_addr
1437         = __ form_address(rscratch2, mdo,
1438                           md->byte_offset_of_slot(data, DataLayout::flags_offset()), 0);
1439       __ ldrb(rscratch1, data_addr);
1440       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1441       __ strb(rscratch1, data_addr);
1442       __ b(done);
1443       __ bind(not_null);
1444 
1445       Register recv = k_RInfo;
1446       __ load_klass(recv, value);
1447       type_profile_helper(mdo, md, data, recv);
1448     } else {
1449       __ cbz(value, done);
1450     }
1451 
1452     add_debug_info_for_null_check_here(op->info_for_exception());
1453     __ load_klass(k_RInfo, array);
1454     __ load_klass(klass_RInfo, value);
1455 
1456     // get instance klass (it's already uncompressed)
1457     __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1458     // perform the fast part of the checking logic
1459     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1460     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1461     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1462     __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1463     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1464     // result is a boolean
1465     __ cbzw(k_RInfo, *failure_target);
1466     // fall through to the success case
1467 
1468     __ bind(done);
1469   } else if (code == lir_checkcast) {
1470     Register obj = op->object()->as_register();
1471     Register dst = op->result_opr()->as_register();
1472     Label success;
1473     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1474     __ bind(success);
1475     if (dst != obj) {
1476       __ mov(dst, obj);
1477     }
1478   } else if (code == lir_instanceof) {
1479     Register obj = op->object()->as_register();
1480     Register dst = op->result_opr()->as_register();
1481     Label success, failure, done;
1482     emit_typecheck_helper(op, &success, &failure, &failure);
1483     __ bind(failure);
1484     __ mov(dst, zr);
1485     __ b(done);
1486     __ bind(success);
1487     __ mov(dst, 1);
1488     __ bind(done);
1489   } else {
1490     ShouldNotReachHere();
1491   }
1492 }
1493 






























































































1494 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1495   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1496   __ cset(rscratch1, Assembler::NE);
1497   __ membar(__ AnyAny);
1498 }
1499 
1500 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1501   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1502   __ cset(rscratch1, Assembler::NE);
1503   __ membar(__ AnyAny);
1504 }
1505 
1506 
1507 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1508   Register addr;
1509   if (op->addr()->is_register()) {
1510     addr = as_reg(op->addr());
1511   } else {
1512     assert(op->addr()->is_address(), "what else?");
1513     LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1514     assert(addr_ptr->disp() == 0, "need 0 disp");
1515     assert(addr_ptr->index() == LIR_Opr::illegalOpr(), "need 0 index");
1516     addr = as_reg(addr_ptr->base());
1517   }
1518   Register newval = as_reg(op->new_value());
1519   Register cmpval = as_reg(op->cmp_value());
1520 
1521   if (op->code() == lir_cas_obj) {
1522     if (UseCompressedOops) {
1523       Register t1 = op->tmp1()->as_register();
1524       assert(op->tmp1()->is_valid(), "must be");
1525       __ encode_heap_oop(t1, cmpval);
1526       cmpval = t1;
1527       __ encode_heap_oop(rscratch2, newval);
1528       newval = rscratch2;
1529       casw(addr, newval, cmpval);
1530     } else {
1531       casl(addr, newval, cmpval);
1532     }
1533   } else if (op->code() == lir_cas_int) {
1534     casw(addr, newval, cmpval);
1535   } else {
1536     casl(addr, newval, cmpval);
1537   }
1538 }
1539 
1540 
1541 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1542                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1543   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on aarch64");
1544 
1545   Assembler::Condition acond, ncond;
1546   switch (condition) {
1547   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1548   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1549   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1550   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1551   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1552   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1553   case lir_cond_belowEqual:
1554   case lir_cond_aboveEqual:
1555   default:                    ShouldNotReachHere();
1556     acond = Assembler::EQ; ncond = Assembler::NE;  // unreachable
1557   }
1558 
1559   assert(result->is_single_cpu() || result->is_double_cpu(),
1560          "expect single register for result");
1561   if (opr1->is_constant() && opr2->is_constant()
1562       && opr1->type() == T_INT && opr2->type() == T_INT) {
1563     jint val1 = opr1->as_jint();
1564     jint val2 = opr2->as_jint();
1565     if (val1 == 0 && val2 == 1) {
1566       __ cset(result->as_register(), ncond);
1567       return;
1568     } else if (val1 == 1 && val2 == 0) {
1569       __ cset(result->as_register(), acond);
1570       return;
1571     }
1572   }
1573 
1574   if (opr1->is_constant() && opr2->is_constant()
1575       && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1576     jlong val1 = opr1->as_jlong();
1577     jlong val2 = opr2->as_jlong();
1578     if (val1 == 0 && val2 == 1) {
1579       __ cset(result->as_register_lo(), ncond);
1580       return;
1581     } else if (val1 == 1 && val2 == 0) {
1582       __ cset(result->as_register_lo(), acond);
1583       return;
1584     }
1585   }
1586 
1587   if (opr1->is_stack()) {
1588     stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1589     opr1 = FrameMap::rscratch1_opr;
1590   } else if (opr1->is_constant()) {
1591     LIR_Opr tmp
1592       = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1593     const2reg(opr1, tmp, lir_patch_none, nullptr);
1594     opr1 = tmp;
1595   }
1596 
1597   if (opr2->is_stack()) {
1598     stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1599     opr2 = FrameMap::rscratch2_opr;
1600   } else if (opr2->is_constant()) {
1601     LIR_Opr tmp
1602       = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1603     const2reg(opr2, tmp, lir_patch_none, nullptr);
1604     opr2 = tmp;
1605   }
1606 
1607   if (result->type() == T_LONG)
1608     __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1609   else
1610     __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1611 }
1612 
1613 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1614   assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1615 
1616   if (left->is_single_cpu()) {
1617     Register lreg = left->as_register();
1618     Register dreg = as_reg(dest);
1619 
1620     if (right->is_single_cpu()) {
1621       // cpu register - cpu register
1622 
1623       assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1624              "should be");
1625       Register rreg = right->as_register();
1626       switch (code) {
1627       case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1628       case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1629       case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1630       default:      ShouldNotReachHere();
1631       }
1632 
1633     } else if (right->is_double_cpu()) {
1634       Register rreg = right->as_register_lo();
1635       // single_cpu + double_cpu: can happen with obj+long
1636       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1637       switch (code) {
1638       case lir_add: __ add(dreg, lreg, rreg); break;
1639       case lir_sub: __ sub(dreg, lreg, rreg); break;
1640       default: ShouldNotReachHere();
1641       }
1642     } else if (right->is_constant()) {
1643       // cpu register - constant
1644       jlong c;
1645 
1646       // FIXME.  This is fugly: we really need to factor all this logic.
1647       switch(right->type()) {
1648       case T_LONG:
1649         c = right->as_constant_ptr()->as_jlong();
1650         break;
1651       case T_INT:
1652       case T_ADDRESS:
1653         c = right->as_constant_ptr()->as_jint();
1654         break;
1655       default:
1656         ShouldNotReachHere();
1657         c = 0;  // unreachable
1658         break;
1659       }
1660 
1661       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1662       if (c == 0 && dreg == lreg) {
1663         COMMENT("effective nop elided");
1664         return;
1665       }
1666       switch(left->type()) {
1667       case T_INT:
1668         switch (code) {
1669         case lir_add: __ addw(dreg, lreg, c); break;
1670         case lir_sub: __ subw(dreg, lreg, c); break;
1671         default: ShouldNotReachHere();
1672         }
1673         break;
1674       case T_OBJECT:
1675       case T_ADDRESS:
1676         switch (code) {
1677         case lir_add: __ add(dreg, lreg, c); break;
1678         case lir_sub: __ sub(dreg, lreg, c); break;
1679         default: ShouldNotReachHere();
1680         }
1681         break;
1682       default:
1683         ShouldNotReachHere();
1684       }
1685     } else {
1686       ShouldNotReachHere();
1687     }
1688 
1689   } else if (left->is_double_cpu()) {
1690     Register lreg_lo = left->as_register_lo();
1691 
1692     if (right->is_double_cpu()) {
1693       // cpu register - cpu register
1694       Register rreg_lo = right->as_register_lo();
1695       switch (code) {
1696       case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1697       case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1698       case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1699       case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1700       case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1701       default:
1702         ShouldNotReachHere();
1703       }
1704 
1705     } else if (right->is_constant()) {
1706       jlong c = right->as_constant_ptr()->as_jlong();
1707       Register dreg = as_reg(dest);
1708       switch (code) {
1709         case lir_add:
1710         case lir_sub:
1711           if (c == 0 && dreg == lreg_lo) {
1712             COMMENT("effective nop elided");
1713             return;
1714           }
1715           code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
1716           break;
1717         case lir_div:
1718           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1719           if (c == 1) {
1720             // move lreg_lo to dreg if divisor is 1
1721             __ mov(dreg, lreg_lo);
1722           } else {
1723             unsigned int shift = log2i_exact(c);
1724             // use rscratch1 as intermediate result register
1725             __ asr(rscratch1, lreg_lo, 63);
1726             __ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);
1727             __ asr(dreg, rscratch1, shift);
1728           }
1729           break;
1730         case lir_rem:
1731           assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1732           if (c == 1) {
1733             // move 0 to dreg if divisor is 1
1734             __ mov(dreg, zr);
1735           } else {
1736             // use rscratch1 as intermediate result register
1737             __ negs(rscratch1, lreg_lo);
1738             __ andr(dreg, lreg_lo, c - 1);
1739             __ andr(rscratch1, rscratch1, c - 1);
1740             __ csneg(dreg, dreg, rscratch1, Assembler::MI);
1741           }
1742           break;
1743         default:
1744           ShouldNotReachHere();
1745       }
1746     } else {
1747       ShouldNotReachHere();
1748     }
1749   } else if (left->is_single_fpu()) {
1750     assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1751     switch (code) {
1752     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1753     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1754     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1755     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1756     default:
1757       ShouldNotReachHere();
1758     }
1759   } else if (left->is_double_fpu()) {
1760     if (right->is_double_fpu()) {
1761       // fpu register - fpu register
1762       switch (code) {
1763       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1764       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1765       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1766       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1767       default:
1768         ShouldNotReachHere();
1769       }
1770     } else {
1771       if (right->is_constant()) {
1772         ShouldNotReachHere();
1773       }
1774       ShouldNotReachHere();
1775     }
1776   } else if (left->is_single_stack() || left->is_address()) {
1777     assert(left == dest, "left and dest must be equal");
1778     ShouldNotReachHere();
1779   } else {
1780     ShouldNotReachHere();
1781   }
1782 }
1783 
1784 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1785   switch(code) {
1786   case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1787   case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1788   case lir_f2hf: __ flt_to_flt16(dest->as_register(), value->as_float_reg(), tmp->as_float_reg()); break;
1789   case lir_hf2f: __ flt16_to_flt(dest->as_float_reg(), value->as_register(), tmp->as_float_reg()); break;
1790   default      : ShouldNotReachHere();
1791   }
1792 }
1793 
1794 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1795 
1796   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1797   Register Rleft = left->is_single_cpu() ? left->as_register() :
1798                                            left->as_register_lo();
1799    if (dst->is_single_cpu()) {
1800      Register Rdst = dst->as_register();
1801      if (right->is_constant()) {
1802        switch (code) {
1803          case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1804          case lir_logic_or:  __ orrw (Rdst, Rleft, right->as_jint()); break;
1805          case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1806          default: ShouldNotReachHere(); break;
1807        }
1808      } else {
1809        Register Rright = right->is_single_cpu() ? right->as_register() :
1810                                                   right->as_register_lo();
1811        switch (code) {
1812          case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1813          case lir_logic_or:  __ orrw (Rdst, Rleft, Rright); break;
1814          case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1815          default: ShouldNotReachHere(); break;
1816        }
1817      }
1818    } else {
1819      Register Rdst = dst->as_register_lo();
1820      if (right->is_constant()) {
1821        switch (code) {
1822          case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1823          case lir_logic_or:  __ orr (Rdst, Rleft, right->as_jlong()); break;
1824          case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1825          default: ShouldNotReachHere(); break;
1826        }
1827      } else {
1828        Register Rright = right->is_single_cpu() ? right->as_register() :
1829                                                   right->as_register_lo();
1830        switch (code) {
1831          case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1832          case lir_logic_or:  __ orr (Rdst, Rleft, Rright); break;
1833          case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1834          default: ShouldNotReachHere(); break;
1835        }
1836      }
1837    }
1838 }
1839 
1840 
1841 
1842 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {
1843 
1844   // opcode check
1845   assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
1846   bool is_irem = (code == lir_irem);
1847 
1848   // operand check
1849   assert(left->is_single_cpu(),   "left must be register");
1850   assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
1851   assert(result->is_single_cpu(), "result must be register");
1852   Register lreg = left->as_register();
1853   Register dreg = result->as_register();
1854 
1855   // power-of-2 constant check and codegen
1856   if (right->is_constant()) {
1857     int c = right->as_constant_ptr()->as_jint();
1858     assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1859     if (is_irem) {
1860       if (c == 1) {
1861         // move 0 to dreg if divisor is 1
1862         __ movw(dreg, zr);
1863       } else {
1864         // use rscratch1 as intermediate result register
1865         __ negsw(rscratch1, lreg);
1866         __ andw(dreg, lreg, c - 1);
1867         __ andw(rscratch1, rscratch1, c - 1);
1868         __ csnegw(dreg, dreg, rscratch1, Assembler::MI);
1869       }
1870     } else {
1871       if (c == 1) {
1872         // move lreg to dreg if divisor is 1
1873         __ movw(dreg, lreg);
1874       } else {
1875         unsigned int shift = exact_log2(c);
1876         // use rscratch1 as intermediate result register
1877         __ asrw(rscratch1, lreg, 31);
1878         __ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
1879         __ asrw(dreg, rscratch1, shift);
1880       }
1881     }
1882   } else {
1883     Register rreg = right->as_register();
1884     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1885   }
1886 }
1887 
1888 
1889 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1890   if (opr1->is_constant() && opr2->is_single_cpu()) {
1891     // tableswitch
1892     Register reg = as_reg(opr2);
1893     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1894     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1895   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1896     Register reg1 = as_reg(opr1);
1897     if (opr2->is_single_cpu()) {
1898       // cpu register - cpu register
1899       Register reg2 = opr2->as_register();
1900       if (is_reference_type(opr1->type())) {
1901         __ cmpoop(reg1, reg2);
1902       } else {
1903         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1904         __ cmpw(reg1, reg2);
1905       }
1906       return;
1907     }
1908     if (opr2->is_double_cpu()) {
1909       // cpu register - cpu register
1910       Register reg2 = opr2->as_register_lo();
1911       __ cmp(reg1, reg2);
1912       return;
1913     }
1914 
1915     if (opr2->is_constant()) {
1916       bool is_32bit = false; // width of register operand
1917       jlong imm;
1918 
1919       switch(opr2->type()) {
1920       case T_INT:
1921         imm = opr2->as_constant_ptr()->as_jint();
1922         is_32bit = true;
1923         break;
1924       case T_LONG:
1925         imm = opr2->as_constant_ptr()->as_jlong();
1926         break;
1927       case T_ADDRESS:
1928         imm = opr2->as_constant_ptr()->as_jint();
1929         break;
1930       case T_METADATA:
1931         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1932         break;
1933       case T_OBJECT:
1934       case T_ARRAY:
1935         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1936         __ cmpoop(reg1, rscratch1);
1937         return;
1938       default:
1939         ShouldNotReachHere();
1940         imm = 0;  // unreachable
1941         break;
1942       }
1943 
1944       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1945         if (is_32bit)
1946           __ cmpw(reg1, imm);
1947         else
1948           __ subs(zr, reg1, imm);
1949         return;
1950       } else {
1951         __ mov(rscratch1, imm);
1952         if (is_32bit)
1953           __ cmpw(reg1, rscratch1);
1954         else
1955           __ cmp(reg1, rscratch1);
1956         return;
1957       }
1958     } else
1959       ShouldNotReachHere();
1960   } else if (opr1->is_single_fpu()) {
1961     FloatRegister reg1 = opr1->as_float_reg();
1962     assert(opr2->is_single_fpu(), "expect single float register");
1963     FloatRegister reg2 = opr2->as_float_reg();
1964     __ fcmps(reg1, reg2);
1965   } else if (opr1->is_double_fpu()) {
1966     FloatRegister reg1 = opr1->as_double_reg();
1967     assert(opr2->is_double_fpu(), "expect double float register");
1968     FloatRegister reg2 = opr2->as_double_reg();
1969     __ fcmpd(reg1, reg2);
1970   } else {
1971     ShouldNotReachHere();
1972   }
1973 }
1974 
1975 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1976   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1977     bool is_unordered_less = (code == lir_ucmp_fd2i);
1978     if (left->is_single_fpu()) {
1979       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1980     } else if (left->is_double_fpu()) {
1981       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1982     } else {
1983       ShouldNotReachHere();
1984     }
1985   } else if (code == lir_cmp_l2i) {
1986     Label done;
1987     __ cmp(left->as_register_lo(), right->as_register_lo());
1988     __ mov(dst->as_register(), (uint64_t)-1L);
1989     __ br(Assembler::LT, done);
1990     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1991     __ bind(done);
1992   } else {
1993     ShouldNotReachHere();
1994   }
1995 }
1996 
1997 
1998 void LIR_Assembler::align_call(LIR_Code code) {  }
1999 
2000 
2001 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2002   address call = __ trampoline_call(Address(op->addr(), rtype));
2003   if (call == nullptr) {
2004     bailout("trampoline stub overflow");
2005     return;
2006   }
2007   add_call_info(code_offset(), op->info());
2008   __ post_call_nop();
2009 }
2010 
2011 
2012 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2013   address call = __ ic_call(op->addr());
2014   if (call == nullptr) {
2015     bailout("trampoline stub overflow");
2016     return;
2017   }
2018   add_call_info(code_offset(), op->info());
2019   __ post_call_nop();
2020 }
2021 
2022 void LIR_Assembler::emit_static_call_stub() {
2023   address call_pc = __ pc();
2024   address stub = __ start_a_stub(call_stub_size());
2025   if (stub == nullptr) {
2026     bailout("static call stub overflow");
2027     return;
2028   }
2029 
2030   int start = __ offset();
2031 
2032   __ relocate(static_stub_Relocation::spec(call_pc));
2033   __ emit_static_call_stub();
2034 
2035   assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2036         <= call_stub_size(), "stub too big");
2037   __ end_a_stub();
2038 }
2039 
2040 
2041 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2042   assert(exceptionOop->as_register() == r0, "must match");
2043   assert(exceptionPC->as_register() == r3, "must match");
2044 
2045   // exception object is not added to oop map by LinearScan
2046   // (LinearScan assumes that no oops are in fixed registers)
2047   info->add_register_oop(exceptionOop);
2048   StubId unwind_id;
2049 
2050   // get current pc information
2051   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2052   if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {
2053     // As no instructions have been generated yet for this LIR node it's
2054     // possible that an oop map already exists for the current offset.
2055     // In that case insert an dummy NOP here to ensure all oop map PCs
2056     // are unique. See JDK-8237483.
2057     __ nop();
2058   }
2059   int pc_for_athrow_offset = __ offset();
2060   InternalAddress pc_for_athrow(__ pc());
2061   __ adr(exceptionPC->as_register(), pc_for_athrow);
2062   add_call_info(pc_for_athrow_offset, info); // for exception handler
2063 
2064   __ verify_not_null_oop(r0);
2065   // search an exception handler (r0: exception oop, r3: throwing pc)
2066   if (compilation()->has_fpu_code()) {
2067     unwind_id = StubId::c1_handle_exception_id;
2068   } else {
2069     unwind_id = StubId::c1_handle_exception_nofpu_id;
2070   }
2071   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2072 
2073   // FIXME: enough room for two byte trap   ????
2074   __ nop();
2075 }
2076 
2077 
2078 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2079   assert(exceptionOop->as_register() == r0, "must match");
2080 
2081   __ b(_unwind_handler_entry);
2082 }
2083 
2084 
2085 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2086   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2087   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2088 
2089   switch (left->type()) {
2090     case T_INT: {
2091       switch (code) {
2092       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2093       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2094       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2095       default:
2096         ShouldNotReachHere();
2097         break;
2098       }
2099       break;
2100     case T_LONG:
2101     case T_ADDRESS:
2102     case T_OBJECT:
2103       switch (code) {
2104       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2105       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2106       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2107       default:
2108         ShouldNotReachHere();
2109         break;
2110       }
2111       break;
2112     default:
2113       ShouldNotReachHere();
2114       break;
2115     }
2116   }
2117 }
2118 
2119 
2120 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2121   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2122   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2123 
2124   switch (left->type()) {
2125     case T_INT: {
2126       switch (code) {
2127       case lir_shl:  __ lslw (dreg, lreg, count); break;
2128       case lir_shr:  __ asrw (dreg, lreg, count); break;
2129       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2130       default:
2131         ShouldNotReachHere();
2132         break;
2133       }
2134       break;
2135     case T_LONG:
2136     case T_ADDRESS:
2137     case T_OBJECT:
2138       switch (code) {
2139       case lir_shl:  __ lsl (dreg, lreg, count); break;
2140       case lir_shr:  __ asr (dreg, lreg, count); break;
2141       case lir_ushr: __ lsr (dreg, lreg, count); break;
2142       default:
2143         ShouldNotReachHere();
2144         break;
2145       }
2146       break;
2147     default:
2148       ShouldNotReachHere();
2149       break;
2150     }
2151   }
2152 }
2153 
2154 
2155 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2156   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2157   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2158   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2159   __ str (r, Address(sp, offset_from_rsp_in_bytes));
2160 }
2161 
2162 
2163 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2164   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2165   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2166   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2167   __ mov (rscratch1, c);
2168   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2169 }
2170 
2171 
2172 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2173   ShouldNotReachHere();
2174   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2175   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2176   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2177   __ lea(rscratch1, __ constant_oop_address(o));
2178   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2179 }
2180 












2181 
2182 // This code replaces a call to arraycopy; no exception may
2183 // be thrown in this code, they must be thrown in the System.arraycopy
2184 // activation frame; we could save some checks if this would not be the case
2185 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2186   ciArrayKlass* default_type = op->expected_type();
2187   Register src = op->src()->as_register();
2188   Register dst = op->dst()->as_register();
2189   Register src_pos = op->src_pos()->as_register();
2190   Register dst_pos = op->dst_pos()->as_register();
2191   Register length  = op->length()->as_register();
2192   Register tmp = op->tmp()->as_register();
2193 
2194   CodeStub* stub = op->stub();
2195   int flags = op->flags();
2196   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2197   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2198 






2199   // if we don't know anything, just go through the generic arraycopy
2200   if (default_type == nullptr // || basic_type == T_OBJECT
2201       ) {
2202     Label done;
2203     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2204 
2205     // Save the arguments in case the generic arraycopy fails and we
2206     // have to fall back to the JNI stub
2207     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2208     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2209     __ str(src,              Address(sp, 4*BytesPerWord));
2210 
2211     address copyfunc_addr = StubRoutines::generic_arraycopy();
2212     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2213 
2214     // The arguments are in java calling convention so we shift them
2215     // to C convention
2216     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2217     __ mov(c_rarg0, j_rarg0);
2218     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2219     __ mov(c_rarg1, j_rarg1);
2220     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2221     __ mov(c_rarg2, j_rarg2);
2222     assert_different_registers(c_rarg3, j_rarg4);
2223     __ mov(c_rarg3, j_rarg3);
2224     __ mov(c_rarg4, j_rarg4);
2225 #ifndef PRODUCT
2226     if (PrintC1Statistics) {
2227       __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2228     }
2229 #endif
2230     __ far_call(RuntimeAddress(copyfunc_addr));
2231 
2232     __ cbz(r0, *stub->continuation());
2233 
2234     // Reload values from the stack so they are where the stub
2235     // expects them.
2236     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2237     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2238     __ ldr(src,              Address(sp, 4*BytesPerWord));
2239 
2240     // r0 is -1^K where K == partial copied count
2241     __ eonw(rscratch1, r0, zr);
2242     // adjust length down and src/end pos up by partial copied count
2243     __ subw(length, length, rscratch1);
2244     __ addw(src_pos, src_pos, rscratch1);
2245     __ addw(dst_pos, dst_pos, rscratch1);
2246     __ b(*stub->entry());
2247 
2248     __ bind(*stub->continuation());
2249     return;
2250   }
2251 








2252   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2253 
2254   int elem_size = type2aelembytes(basic_type);
2255   int scale = exact_log2(elem_size);
2256 
2257   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2258   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2259 
2260   // test for null
2261   if (flags & LIR_OpArrayCopy::src_null_check) {
2262     __ cbz(src, *stub->entry());
2263   }
2264   if (flags & LIR_OpArrayCopy::dst_null_check) {
2265     __ cbz(dst, *stub->entry());
2266   }
2267 
2268   // If the compiler was not able to prove that exact type of the source or the destination
2269   // of the arraycopy is an array type, check at runtime if the source or the destination is
2270   // an instance type.
2271   if (flags & LIR_OpArrayCopy::type_check) {
2272     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2273       __ load_klass(tmp, dst);
2274       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2275       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2276       __ br(Assembler::GE, *stub->entry());
2277     }
2278 
2279     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2280       __ load_klass(tmp, src);
2281       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2282       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2283       __ br(Assembler::GE, *stub->entry());
2284     }
2285   }
2286 
2287   // check if negative
2288   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2289     __ cmpw(src_pos, 0);
2290     __ br(Assembler::LT, *stub->entry());
2291   }
2292   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2293     __ cmpw(dst_pos, 0);
2294     __ br(Assembler::LT, *stub->entry());
2295   }
2296 
2297   if (flags & LIR_OpArrayCopy::length_positive_check) {
2298     __ cmpw(length, 0);
2299     __ br(Assembler::LT, *stub->entry());
2300   }
2301 
2302   if (flags & LIR_OpArrayCopy::src_range_check) {
2303     __ addw(tmp, src_pos, length);
2304     __ ldrw(rscratch1, src_length_addr);
2305     __ cmpw(tmp, rscratch1);
2306     __ br(Assembler::HI, *stub->entry());
2307   }
2308   if (flags & LIR_OpArrayCopy::dst_range_check) {
2309     __ addw(tmp, dst_pos, length);
2310     __ ldrw(rscratch1, dst_length_addr);
2311     __ cmpw(tmp, rscratch1);
2312     __ br(Assembler::HI, *stub->entry());
2313   }
2314 
2315   if (flags & LIR_OpArrayCopy::type_check) {
2316     // We don't know the array types are compatible
2317     if (basic_type != T_OBJECT) {
2318       // Simple test for basic type arrays
2319       __ cmp_klasses_from_objects(src, dst, tmp, rscratch1);
2320       __ br(Assembler::NE, *stub->entry());
2321     } else {
2322       // For object arrays, if src is a sub class of dst then we can
2323       // safely do the copy.
2324       Label cont, slow;
2325 
2326 #define PUSH(r1, r2)                                    \
2327       stp(r1, r2, __ pre(sp, -2 * wordSize));
2328 
2329 #define POP(r1, r2)                                     \
2330       ldp(r1, r2, __ post(sp, 2 * wordSize));
2331 
2332       __ PUSH(src, dst);
2333 
2334       __ load_klass(src, src);
2335       __ load_klass(dst, dst);
2336 
2337       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2338 
2339       __ PUSH(src, dst);
2340       __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2341       __ POP(src, dst);
2342 
2343       __ cbnz(src, cont);
2344 
2345       __ bind(slow);
2346       __ POP(src, dst);
2347 
2348       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2349       if (copyfunc_addr != nullptr) { // use stub if available
2350         // src is not a sub class of dst so we have to do a
2351         // per-element check.
2352 
2353         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2354         if ((flags & mask) != mask) {
2355           // Check that at least both of them object arrays.
2356           assert(flags & mask, "one of the two should be known to be an object array");
2357 
2358           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2359             __ load_klass(tmp, src);
2360           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2361             __ load_klass(tmp, dst);
2362           }
2363           int lh_offset = in_bytes(Klass::layout_helper_offset());
2364           Address klass_lh_addr(tmp, lh_offset);
2365           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2366           __ ldrw(rscratch1, klass_lh_addr);
2367           __ mov(rscratch2, objArray_lh);
2368           __ eorw(rscratch1, rscratch1, rscratch2);
2369           __ cbnzw(rscratch1, *stub->entry());
2370         }
2371 
2372        // Spill because stubs can use any register they like and it's
2373        // easier to restore just those that we care about.
2374         __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2375         __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2376         __ str(src,              Address(sp, 4*BytesPerWord));
2377 
2378         __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2379         __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2380         assert_different_registers(c_rarg0, dst, dst_pos, length);
2381         __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2382         __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2383         assert_different_registers(c_rarg1, dst, length);
2384         __ uxtw(c_rarg2, length);
2385         assert_different_registers(c_rarg2, dst);
2386 
2387         __ load_klass(c_rarg4, dst);
2388         __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2389         __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2390         __ far_call(RuntimeAddress(copyfunc_addr));
2391 
2392 #ifndef PRODUCT
2393         if (PrintC1Statistics) {
2394           Label failed;
2395           __ cbnz(r0, failed);
2396           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2397           __ bind(failed);
2398         }
2399 #endif
2400 
2401         __ cbz(r0, *stub->continuation());
2402 
2403 #ifndef PRODUCT
2404         if (PrintC1Statistics) {
2405           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2406         }
2407 #endif
2408         assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2409 
2410         // Restore previously spilled arguments
2411         __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2412         __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2413         __ ldr(src,              Address(sp, 4*BytesPerWord));
2414 
2415         // return value is -1^K where K is partial copied count
2416         __ eonw(rscratch1, r0, zr);
2417         // adjust length down and src/end pos up by partial copied count
2418         __ subw(length, length, rscratch1);
2419         __ addw(src_pos, src_pos, rscratch1);
2420         __ addw(dst_pos, dst_pos, rscratch1);
2421       }
2422 
2423       __ b(*stub->entry());
2424 
2425       __ bind(cont);
2426       __ POP(src, dst);
2427     }
2428   }
2429 
2430 #ifdef ASSERT
2431   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2432     // Sanity check the known type with the incoming class.  For the
2433     // primitive case the types must match exactly with src.klass and
2434     // dst.klass each exactly matching the default type.  For the
2435     // object array case, if no type check is needed then either the
2436     // dst type is exactly the expected type and the src type is a
2437     // subtype which we can't check or src is the same array as dst
2438     // but not necessarily exactly of type default_type.
2439     Label known_ok, halt;
2440     __ mov_metadata(tmp, default_type->constant_encoding());
2441 
2442     if (basic_type != T_OBJECT) {
2443       __ cmp_klass(dst, tmp, rscratch1);
2444       __ br(Assembler::NE, halt);
2445       __ cmp_klass(src, tmp, rscratch1);
2446       __ br(Assembler::EQ, known_ok);
2447     } else {
2448       __ cmp_klass(dst, tmp, rscratch1);
2449       __ br(Assembler::EQ, known_ok);
2450       __ cmp(src, dst);
2451       __ br(Assembler::EQ, known_ok);
2452     }
2453     __ bind(halt);
2454     __ stop("incorrect type information in arraycopy");
2455     __ bind(known_ok);
2456   }
2457 #endif
2458 
2459 #ifndef PRODUCT
2460   if (PrintC1Statistics) {
2461     __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2462   }
2463 #endif
2464 
2465   __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2466   __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2467   assert_different_registers(c_rarg0, dst, dst_pos, length);
2468   __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2469   __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2470   assert_different_registers(c_rarg1, dst, length);
2471   __ uxtw(c_rarg2, length);
2472   assert_different_registers(c_rarg2, dst);
2473 
2474   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2475   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2476   const char *name;
2477   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2478 
2479  CodeBlob *cb = CodeCache::find_blob(entry);
2480  if (cb) {
2481    __ far_call(RuntimeAddress(entry));
2482  } else {
2483    __ call_VM_leaf(entry, 3);
2484  }
2485 
2486   if (stub != nullptr) {
2487     __ bind(*stub->continuation());
2488   }
2489 }
2490 
2491 
2492 
2493 
2494 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2495   Register obj = op->obj_opr()->as_register();  // may not be an oop
2496   Register hdr = op->hdr_opr()->as_register();
2497   Register lock = op->lock_opr()->as_register();
2498   Register temp = op->scratch_opr()->as_register();
2499   if (op->code() == lir_lock) {
2500     // add debug info for NullPointerException only if one is possible
2501     int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
2502     if (op->info() != nullptr) {
2503       add_debug_info_for_null_check(null_check_offset, op->info());
2504     }
2505     // done
2506   } else if (op->code() == lir_unlock) {
2507     __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
2508   } else {
2509     Unimplemented();
2510   }
2511   __ bind(*op->stub()->continuation());
2512 }
2513 
2514 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2515   Register obj = op->obj()->as_pointer_register();
2516   Register result = op->result_opr()->as_pointer_register();
2517 
2518   CodeEmitInfo* info = op->info();
2519   if (info != nullptr) {
2520     add_debug_info_for_null_check_here(info);
2521   }
2522 
2523   __ load_klass(result, obj);
2524 }
2525 
2526 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2527   ciMethod* method = op->profiled_method();
2528   int bci          = op->profiled_bci();
2529   ciMethod* callee = op->profiled_callee();
2530 
2531   // Update counter for all call types
2532   ciMethodData* md = method->method_data_or_null();
2533   assert(md != nullptr, "Sanity");
2534   ciProfileData* data = md->bci_to_data(bci);
2535   assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2536   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2537   Register mdo  = op->mdo()->as_register();
2538   __ mov_metadata(mdo, md->constant_encoding());
2539   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2540   // Perform additional virtual call profiling for invokevirtual and
2541   // invokeinterface bytecodes
2542   if (op->should_profile_receiver_type()) {
2543     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2544     Register recv = op->recv()->as_register();
2545     assert_different_registers(mdo, recv);
2546     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2547     ciKlass* known_klass = op->known_holder();
2548     if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2549       // We know the type that will be seen at this call site; we can
2550       // statically update the MethodData* rather than needing to do
2551       // dynamic tests on the receiver type.
2552       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2553       for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
2554         ciKlass* receiver = vc_data->receiver(i);
2555         if (known_klass->equals(receiver)) {
2556           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2557           __ addptr(data_addr, DataLayout::counter_increment);
2558           return;
2559         }
2560       }
2561       // Receiver type is not found in profile data.
2562       // Fall back to runtime helper to handle the rest at runtime.
2563       __ mov_metadata(recv, known_klass->constant_encoding());
2564     } else {
2565       __ load_klass(recv, recv);
2566     }
2567     type_profile_helper(mdo, md, data, recv);
2568   } else {
2569     // Static call
2570     __ addptr(counter_addr, DataLayout::counter_increment);
2571   }
2572 }
2573 
2574 
2575 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2576   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2577 }
2578 
2579 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2580   assert(op->crc()->is_single_cpu(),  "crc must be register");
2581   assert(op->val()->is_single_cpu(),  "byte value must be register");
2582   assert(op->result_opr()->is_single_cpu(), "result must be register");
2583   Register crc = op->crc()->as_register();
2584   Register val = op->val()->as_register();
2585   Register res = op->result_opr()->as_register();
2586 
2587   assert_different_registers(val, crc, res);
2588   uint64_t offset;
2589   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2590   __ add(res, res, offset);
2591 
2592   __ mvnw(crc, crc); // ~crc
2593   __ update_byte_crc32(crc, val, res);
2594   __ mvnw(res, crc); // ~crc
2595 }
2596 
2597 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2598   COMMENT("emit_profile_type {");
2599   Register obj = op->obj()->as_register();
2600   Register tmp = op->tmp()->as_pointer_register();
2601   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2602   ciKlass* exact_klass = op->exact_klass();
2603   intptr_t current_klass = op->current_klass();
2604   bool not_null = op->not_null();
2605   bool no_conflict = op->no_conflict();
2606 
2607   Label update, next, none;
2608 
2609   bool do_null = !not_null;
2610   bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2611   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2612 
2613   assert(do_null || do_update, "why are we here?");
2614   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2615   assert(mdo_addr.base() != rscratch1, "wrong register");
2616 
2617   __ verify_oop(obj);
2618 
2619   if (tmp != obj) {
2620     assert_different_registers(obj, tmp, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
2621     __ mov(tmp, obj);
2622   } else {
2623     assert_different_registers(obj, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
2624   }
2625   if (do_null) {
2626     __ cbnz(tmp, update);
2627     if (!TypeEntries::was_null_seen(current_klass)) {
2628       __ ldr(rscratch2, mdo_addr);
2629       __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2630       __ str(rscratch2, mdo_addr);
2631     }
2632     if (do_update) {
2633 #ifndef ASSERT
2634       __ b(next);
2635     }
2636 #else
2637       __ b(next);
2638     }
2639   } else {
2640     __ cbnz(tmp, update);
2641     __ stop("unexpected null obj");
2642 #endif
2643   }
2644 
2645   __ bind(update);
2646 
2647   if (do_update) {
2648 #ifdef ASSERT
2649     if (exact_klass != nullptr) {
2650       Label ok;
2651       __ load_klass(tmp, tmp);
2652       __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2653       __ eor(rscratch1, tmp, rscratch1);
2654       __ cbz(rscratch1, ok);
2655       __ stop("exact klass and actual klass differ");
2656       __ bind(ok);
2657     }
2658 #endif
2659     if (!no_conflict) {
2660       if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2661         if (exact_klass != nullptr) {
2662           __ mov_metadata(tmp, exact_klass->constant_encoding());
2663         } else {
2664           __ load_klass(tmp, tmp);
2665         }
2666 
2667         __ ldr(rscratch2, mdo_addr);
2668         __ eor(tmp, tmp, rscratch2);
2669         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2670         // klass seen before, nothing to do. The unknown bit may have been
2671         // set already but no need to check.
2672         __ cbz(rscratch1, next);
2673 
2674         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2675 
2676         if (TypeEntries::is_type_none(current_klass)) {
2677           __ cbz(rscratch2, none);
2678           __ cmp(rscratch2, (u1)TypeEntries::null_seen);
2679           __ br(Assembler::EQ, none);
2680           // There is a chance that the checks above
2681           // fail if another thread has just set the
2682           // profiling to this obj's klass
2683           __ dmb(Assembler::ISHLD);
2684           __ eor(tmp, tmp, rscratch2); // get back original value before XOR
2685           __ ldr(rscratch2, mdo_addr);
2686           __ eor(tmp, tmp, rscratch2);
2687           __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2688           __ cbz(rscratch1, next);
2689         }
2690       } else {
2691         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2692                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2693 
2694         __ ldr(tmp, mdo_addr);
2695         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2696       }
2697 
2698       // different than before. Cannot keep accurate profile.
2699       __ ldr(rscratch2, mdo_addr);
2700       __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2701       __ str(rscratch2, mdo_addr);
2702 
2703       if (TypeEntries::is_type_none(current_klass)) {
2704         __ b(next);
2705 
2706         __ bind(none);
2707         // first time here. Set profile type.
2708         __ str(tmp, mdo_addr);
2709 #ifdef ASSERT
2710         __ andr(tmp, tmp, TypeEntries::type_mask);
2711         __ verify_klass_ptr(tmp);
2712 #endif
2713       }
2714     } else {
2715       // There's a single possible klass at this profile point
2716       assert(exact_klass != nullptr, "should be");
2717       if (TypeEntries::is_type_none(current_klass)) {
2718         __ mov_metadata(tmp, exact_klass->constant_encoding());
2719         __ ldr(rscratch2, mdo_addr);
2720         __ eor(tmp, tmp, rscratch2);
2721         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2722         __ cbz(rscratch1, next);
2723 #ifdef ASSERT
2724         {
2725           Label ok;
2726           __ ldr(rscratch1, mdo_addr);
2727           __ cbz(rscratch1, ok);
2728           __ cmp(rscratch1, (u1)TypeEntries::null_seen);
2729           __ br(Assembler::EQ, ok);
2730           // may have been set by another thread
2731           __ dmb(Assembler::ISHLD);
2732           __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2733           __ ldr(rscratch2, mdo_addr);
2734           __ eor(rscratch2, rscratch1, rscratch2);
2735           __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2736           __ cbz(rscratch2, ok);
2737 
2738           __ stop("unexpected profiling mismatch");
2739           __ bind(ok);
2740         }
2741 #endif
2742         // first time here. Set profile type.
2743         __ str(tmp, mdo_addr);
2744 #ifdef ASSERT
2745         __ andr(tmp, tmp, TypeEntries::type_mask);
2746         __ verify_klass_ptr(tmp);
2747 #endif
2748       } else {
2749         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2750                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2751 
2752         __ ldr(tmp, mdo_addr);
2753         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2754 
2755         __ orr(tmp, tmp, TypeEntries::type_unknown);
2756         __ str(tmp, mdo_addr);
2757         // FIXME: Write barrier needed here?
2758       }
2759     }
2760 
2761     __ bind(next);
2762   }
2763   COMMENT("} emit_profile_type");
2764 }
2765 




















2766 
2767 void LIR_Assembler::align_backward_branch_target() {
2768 }
2769 
2770 
2771 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2772   // tmp must be unused
2773   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2774 
2775   if (left->is_single_cpu()) {
2776     assert(dest->is_single_cpu(), "expect single result reg");
2777     __ negw(dest->as_register(), left->as_register());
2778   } else if (left->is_double_cpu()) {
2779     assert(dest->is_double_cpu(), "expect double result reg");
2780     __ neg(dest->as_register_lo(), left->as_register_lo());
2781   } else if (left->is_single_fpu()) {
2782     assert(dest->is_single_fpu(), "expect single float result reg");
2783     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2784   } else {
2785     assert(left->is_double_fpu(), "expect double float operand reg");
2786     assert(dest->is_double_fpu(), "expect double float result reg");
2787     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2788   }
2789 }
2790 
2791 
2792 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2793   if (patch_code != lir_patch_none) {
2794     deoptimize_trap(info);
2795     return;
2796   }
2797 
2798   __ lea(dest->as_pointer_register(), as_Address(addr->as_address_ptr()));
2799 }
2800 
2801 
2802 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2803   assert(!tmp->is_valid(), "don't need temporary");
2804 
2805   CodeBlob *cb = CodeCache::find_blob(dest);
2806   if (cb) {
2807     __ far_call(RuntimeAddress(dest));
2808   } else {
2809     __ mov(rscratch1, RuntimeAddress(dest));
2810     __ blr(rscratch1);
2811   }
2812 
2813   if (info != nullptr) {
2814     add_call_info_here(info);
2815   }
2816   __ post_call_nop();
2817 }
2818 
2819 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2820   if (src->is_address()) {
2821     mem2reg(src, dest, type, lir_patch_none, info, /*wide*/false, /*is_volatile*/true);
2822   } else if (dest->is_address()) {
2823     move_op(src, dest, type, lir_patch_none, info, /*wide*/false);
2824   } else {
2825     ShouldNotReachHere();
2826   }
2827 }
2828 
2829 #ifdef ASSERT
2830 // emit run-time assertion
2831 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2832   assert(op->code() == lir_assert, "must be");
2833 
2834   if (op->in_opr1()->is_valid()) {
2835     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2836     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2837   } else {
2838     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2839     assert(op->condition() == lir_cond_always, "no other conditions allowed");
2840   }
2841 
2842   Label ok;
2843   if (op->condition() != lir_cond_always) {
2844     Assembler::Condition acond = Assembler::AL;
2845     switch (op->condition()) {
2846       case lir_cond_equal:        acond = Assembler::EQ;  break;
2847       case lir_cond_notEqual:     acond = Assembler::NE;  break;
2848       case lir_cond_less:         acond = Assembler::LT;  break;
2849       case lir_cond_lessEqual:    acond = Assembler::LE;  break;
2850       case lir_cond_greaterEqual: acond = Assembler::GE;  break;
2851       case lir_cond_greater:      acond = Assembler::GT;  break;
2852       case lir_cond_belowEqual:   acond = Assembler::LS;  break;
2853       case lir_cond_aboveEqual:   acond = Assembler::HS;  break;
2854       default:                    ShouldNotReachHere();
2855     }
2856     __ br(acond, ok);
2857   }
2858   if (op->halt()) {
2859     const char* str = __ code_string(op->msg());
2860     __ stop(str);
2861   } else {
2862     breakpoint();
2863   }
2864   __ bind(ok);
2865 }
2866 #endif
2867 
2868 #ifndef PRODUCT
2869 #define COMMENT(x)   do { __ block_comment(x); } while (0)
2870 #else
2871 #define COMMENT(x)
2872 #endif
2873 
2874 void LIR_Assembler::membar() {
2875   COMMENT("membar");
2876   __ membar(MacroAssembler::AnyAny);
2877 }
2878 
2879 void LIR_Assembler::membar_acquire() {
2880   __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2881 }
2882 
2883 void LIR_Assembler::membar_release() {
2884   __ membar(Assembler::LoadStore|Assembler::StoreStore);
2885 }
2886 
2887 void LIR_Assembler::membar_loadload() {
2888   __ membar(Assembler::LoadLoad);
2889 }
2890 
2891 void LIR_Assembler::membar_storestore() {
2892   __ membar(MacroAssembler::StoreStore);
2893 }
2894 
2895 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2896 
2897 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2898 
2899 void LIR_Assembler::on_spin_wait() {
2900   __ spin_wait();
2901 }
2902 
2903 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2904   __ mov(result_reg->as_register(), rthread);
2905 }
2906 




2907 
2908 void LIR_Assembler::peephole(LIR_List *lir) {
2909 #if 0
2910   if (tableswitch_count >= max_tableswitches)
2911     return;
2912 
2913   /*
2914     This finite-state automaton recognizes sequences of compare-and-
2915     branch instructions.  We will turn them into a tableswitch.  You
2916     could argue that C1 really shouldn't be doing this sort of
2917     optimization, but without it the code is really horrible.
2918   */
2919 
2920   enum { start_s, cmp1_s, beq_s, cmp_s } state;
2921   int first_key, last_key = -2147483648;
2922   int next_key = 0;
2923   int start_insn = -1;
2924   int last_insn = -1;
2925   Register reg = noreg;
2926   LIR_Opr reg_opr;
2927   state = start_s;
2928 
2929   LIR_OpList* inst = lir->instructions_list();
2930   for (int i = 0; i < inst->length(); i++) {
2931     LIR_Op* op = inst->at(i);
2932     switch (state) {
2933     case start_s:
2934       first_key = -1;
2935       start_insn = i;
2936       switch (op->code()) {
2937       case lir_cmp:
2938         LIR_Opr opr1 = op->as_Op2()->in_opr1();
2939         LIR_Opr opr2 = op->as_Op2()->in_opr2();
2940         if (opr1->is_cpu_register() && opr1->is_single_cpu()
2941             && opr2->is_constant()
2942             && opr2->type() == T_INT) {
2943           reg_opr = opr1;
2944           reg = opr1->as_register();
2945           first_key = opr2->as_constant_ptr()->as_jint();
2946           next_key = first_key + 1;
2947           state = cmp_s;
2948           goto next_state;
2949         }
2950         break;
2951       }
2952       break;
2953     case cmp_s:
2954       switch (op->code()) {
2955       case lir_branch:
2956         if (op->as_OpBranch()->cond() == lir_cond_equal) {
2957           state = beq_s;
2958           last_insn = i;
2959           goto next_state;
2960         }
2961       }
2962       state = start_s;
2963       break;
2964     case beq_s:
2965       switch (op->code()) {
2966       case lir_cmp: {
2967         LIR_Opr opr1 = op->as_Op2()->in_opr1();
2968         LIR_Opr opr2 = op->as_Op2()->in_opr2();
2969         if (opr1->is_cpu_register() && opr1->is_single_cpu()
2970             && opr1->as_register() == reg
2971             && opr2->is_constant()
2972             && opr2->type() == T_INT
2973             && opr2->as_constant_ptr()->as_jint() == next_key) {
2974           last_key = next_key;
2975           next_key++;
2976           state = cmp_s;
2977           goto next_state;
2978         }
2979       }
2980       }
2981       last_key = next_key;
2982       state = start_s;
2983       break;
2984     default:
2985       assert(false, "impossible state");
2986     }
2987     if (state == start_s) {
2988       if (first_key < last_key - 5L && reg != noreg) {
2989         {
2990           // printf("found run register %d starting at insn %d low value %d high value %d\n",
2991           //        reg->encoding(),
2992           //        start_insn, first_key, last_key);
2993           //   for (int i = 0; i < inst->length(); i++) {
2994           //     inst->at(i)->print();
2995           //     tty->print("\n");
2996           //   }
2997           //   tty->print("\n");
2998         }
2999 
3000         struct tableswitch *sw = &switches[tableswitch_count];
3001         sw->_insn_index = start_insn, sw->_first_key = first_key,
3002           sw->_last_key = last_key, sw->_reg = reg;
3003         inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3004         {
3005           // Insert the new table of branches
3006           int offset = last_insn;
3007           for (int n = first_key; n < last_key; n++) {
3008             inst->insert_before
3009               (last_insn + 1,
3010                new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3011                                 inst->at(offset)->as_OpBranch()->label()));
3012             offset -= 2, i++;
3013           }
3014         }
3015         // Delete all the old compare-and-branch instructions
3016         for (int n = first_key; n < last_key; n++) {
3017           inst->remove_at(start_insn);
3018           inst->remove_at(start_insn);
3019         }
3020         // Insert the tableswitch instruction
3021         inst->insert_before(start_insn,
3022                             new LIR_Op2(lir_cmp, lir_cond_always,
3023                                         LIR_OprFact::intConst(tableswitch_count),
3024                                         reg_opr));
3025         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3026         tableswitch_count++;
3027       }
3028       reg = noreg;
3029       last_key = -2147483648;
3030     }
3031   next_state:
3032     ;
3033   }
3034 #endif
3035 }
3036 
3037 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3038   Address addr = as_Address(src->as_address_ptr());
3039   BasicType type = src->type();
3040   bool is_oop = is_reference_type(type);
3041 
3042   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3043   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3044 
3045   switch(type) {
3046   case T_INT:
3047     xchg = &MacroAssembler::atomic_xchgalw;
3048     add = &MacroAssembler::atomic_addalw;
3049     break;
3050   case T_LONG:
3051     xchg = &MacroAssembler::atomic_xchgal;
3052     add = &MacroAssembler::atomic_addal;
3053     break;
3054   case T_OBJECT:
3055   case T_ARRAY:
3056     if (UseCompressedOops) {
3057       xchg = &MacroAssembler::atomic_xchgalw;
3058       add = &MacroAssembler::atomic_addalw;
3059     } else {
3060       xchg = &MacroAssembler::atomic_xchgal;
3061       add = &MacroAssembler::atomic_addal;
3062     }
3063     break;
3064   default:
3065     ShouldNotReachHere();
3066     xchg = &MacroAssembler::atomic_xchgal;
3067     add = &MacroAssembler::atomic_addal; // unreachable
3068   }
3069 
3070   switch (code) {
3071   case lir_xadd:
3072     {
3073       RegisterOrConstant inc;
3074       Register tmp = as_reg(tmp_op);
3075       Register dst = as_reg(dest);
3076       if (data->is_constant()) {
3077         inc = RegisterOrConstant(as_long(data));
3078         assert_different_registers(dst, addr.base(), tmp,
3079                                    rscratch1, rscratch2);
3080       } else {
3081         inc = RegisterOrConstant(as_reg(data));
3082         assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3083                                    rscratch1, rscratch2);
3084       }
3085       __ lea(tmp, addr);
3086       (_masm->*add)(dst, inc, tmp);
3087       break;
3088     }
3089   case lir_xchg:
3090     {
3091       Register tmp = tmp_op->as_register();
3092       Register obj = as_reg(data);
3093       Register dst = as_reg(dest);
3094       if (is_oop && UseCompressedOops) {
3095         __ encode_heap_oop(rscratch2, obj);
3096         obj = rscratch2;
3097       }
3098       assert_different_registers(obj, addr.base(), tmp, rscratch1);
3099       assert_different_registers(dst, addr.base(), tmp, rscratch1);
3100       __ lea(tmp, addr);
3101       (_masm->*xchg)(dst, obj, tmp);
3102       if (is_oop && UseCompressedOops) {
3103         __ decode_heap_oop(dst);
3104       }
3105     }
3106     break;
3107   default:
3108     ShouldNotReachHere();
3109   }
3110   if(!UseLSE) {
3111     __ membar(__ AnyAny);
3112   }
3113 }
3114 
3115 #undef __
--- EOF ---