1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/cardTableBarrierSet.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "vmreg_aarch64.inline.hpp"
  45 
  46 
  47 
  48 #ifndef PRODUCT
  49 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  50 #else
  51 #define COMMENT(x)
  52 #endif
  53 
  54 NEEDS_CLEANUP // remove this definitions ?
  55 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  56 const Register SYNC_header = r0;   // synchronization header
  57 const Register SHIFT_count = r0;   // where count for shift operations must be
  58 
  59 #define __ _masm->
  60 
  61 
  62 static void select_different_registers(Register preserve,
  63                                        Register extra,
  64                                        Register &tmp1,
  65                                        Register &tmp2) {
  66   if (tmp1 == preserve) {
  67     assert_different_registers(tmp1, tmp2, extra);
  68     tmp1 = extra;
  69   } else if (tmp2 == preserve) {
  70     assert_different_registers(tmp1, tmp2, extra);
  71     tmp2 = extra;
  72   }
  73   assert_different_registers(preserve, tmp1, tmp2);
  74 }
  75 
  76 
  77 
  78 static void select_different_registers(Register preserve,
  79                                        Register extra,
  80                                        Register &tmp1,
  81                                        Register &tmp2,
  82                                        Register &tmp3) {
  83   if (tmp1 == preserve) {
  84     assert_different_registers(tmp1, tmp2, tmp3, extra);
  85     tmp1 = extra;
  86   } else if (tmp2 == preserve) {
  87     assert_different_registers(tmp1, tmp2, tmp3, extra);
  88     tmp2 = extra;
  89   } else if (tmp3 == preserve) {
  90     assert_different_registers(tmp1, tmp2, tmp3, extra);
  91     tmp3 = extra;
  92   }
  93   assert_different_registers(preserve, tmp1, tmp2, tmp3);
  94 }
  95 
  96 
  97 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
  98 
  99 
 100 LIR_Opr LIR_Assembler::receiverOpr() {
 101   return FrameMap::receiver_opr;
 102 }
 103 
 104 LIR_Opr LIR_Assembler::osrBufferPointer() {
 105   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 106 }
 107 
 108 //--------------fpu register translations-----------------------
 109 
 110 
 111 address LIR_Assembler::float_constant(float f) {
 112   address const_addr = __ float_constant(f);
 113   if (const_addr == NULL) {
 114     bailout("const section overflow");
 115     return __ code()->consts()->start();
 116   } else {
 117     return const_addr;
 118   }
 119 }
 120 
 121 
 122 address LIR_Assembler::double_constant(double d) {
 123   address const_addr = __ double_constant(d);
 124   if (const_addr == NULL) {
 125     bailout("const section overflow");
 126     return __ code()->consts()->start();
 127   } else {
 128     return const_addr;
 129   }
 130 }
 131 
 132 address LIR_Assembler::int_constant(jlong n) {
 133   address const_addr = __ long_constant(n);
 134   if (const_addr == NULL) {
 135     bailout("const section overflow");
 136     return __ code()->consts()->start();
 137   } else {
 138     return const_addr;
 139   }
 140 }
 141 
 142 void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }
 143 
 144 void LIR_Assembler::reset_FPU() { Unimplemented(); }
 145 
 146 void LIR_Assembler::fpop() { Unimplemented(); }
 147 
 148 void LIR_Assembler::fxch(int i) { Unimplemented(); }
 149 
 150 void LIR_Assembler::fld(int i) { Unimplemented(); }
 151 
 152 void LIR_Assembler::ffree(int i) { Unimplemented(); }
 153 
 154 void LIR_Assembler::breakpoint() { Unimplemented(); }
 155 
 156 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
 157 
 158 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
 159 
 160 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
 161 //-------------------------------------------
 162 
 163 static Register as_reg(LIR_Opr op) {
 164   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 165 }
 166 
 167 static jlong as_long(LIR_Opr data) {
 168   jlong result;
 169   switch (data->type()) {
 170   case T_INT:
 171     result = (data->as_jint());
 172     break;
 173   case T_LONG:
 174     result = (data->as_jlong());
 175     break;
 176   default:
 177     ShouldNotReachHere();
 178     result = 0;  // unreachable
 179   }
 180   return result;
 181 }
 182 
 183 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 184   Register base = addr->base()->as_pointer_register();
 185   LIR_Opr opr = addr->index();
 186   if (opr->is_cpu_register()) {
 187     Register index;
 188     if (opr->is_single_cpu())
 189       index = opr->as_register();
 190     else
 191       index = opr->as_register_lo();
 192     assert(addr->disp() == 0, "must be");
 193     switch(opr->type()) {
 194       case T_INT:
 195         return Address(base, index, Address::sxtw(addr->scale()));
 196       case T_LONG:
 197         return Address(base, index, Address::lsl(addr->scale()));
 198       default:
 199         ShouldNotReachHere();
 200       }
 201   } else  {
 202     intptr_t addr_offset = intptr_t(addr->disp());
 203     if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
 204       return Address(base, addr_offset, Address::lsl(addr->scale()));
 205     else {
 206       __ mov(tmp, addr_offset);
 207       return Address(base, tmp, Address::lsl(addr->scale()));
 208     }
 209   }
 210   return Address();
 211 }
 212 
 213 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 214   ShouldNotReachHere();
 215   return Address();
 216 }
 217 
 218 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 219   return as_Address(addr, rscratch1);
 220 }
 221 
 222 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 223   return as_Address(addr, rscratch1);  // Ouch
 224   // FIXME: This needs to be much more clever.  See x86.
 225 }
 226 
 227 
 228 void LIR_Assembler::osr_entry() {
 229   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 230   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 231   ValueStack* entry_state = osr_entry->state();
 232   int number_of_locks = entry_state->locks_size();
 233 
 234   // we jump here if osr happens with the interpreter
 235   // state set up to continue at the beginning of the
 236   // loop that triggered osr - in particular, we have
 237   // the following registers setup:
 238   //
 239   // r2: osr buffer
 240   //
 241 
 242   // build frame
 243   ciMethod* m = compilation()->method();
 244   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 245 
 246   // OSR buffer is
 247   //
 248   // locals[nlocals-1..0]
 249   // monitors[0..number_of_locks]
 250   //
 251   // locals is a direct copy of the interpreter frame so in the osr buffer
 252   // so first slot in the local array is the last local from the interpreter
 253   // and last slot is local[0] (receiver) from the interpreter
 254   //
 255   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 256   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 257   // in the interpreter frame (the method lock if a sync method)
 258 
 259   // Initialize monitors in the compiled activation.
 260   //   r2: pointer to osr buffer
 261   //
 262   // All other registers are dead at this point and the locals will be
 263   // copied into place by code emitted in the IR.
 264 
 265   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 266   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 267     int monitor_offset = BytesPerWord * method()->max_locals() +
 268       (2 * BytesPerWord) * (number_of_locks - 1);
 269     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 270     // the OSR buffer using 2 word entries: first the lock and then
 271     // the oop.
 272     for (int i = 0; i < number_of_locks; i++) {
 273       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 274 #ifdef ASSERT
 275       // verify the interpreter's monitor has a non-null object
 276       {
 277         Label L;
 278         __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 279         __ cbnz(rscratch1, L);
 280         __ stop("locked object is NULL");
 281         __ bind(L);
 282       }
 283 #endif
 284       __ ldr(r19, Address(OSR_buf, slot_offset + 0));
 285       __ str(r19, frame_map()->address_for_monitor_lock(i));
 286       __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 287       __ str(r19, frame_map()->address_for_monitor_object(i));
 288     }
 289   }
 290 }
 291 
 292 
 293 // inline cache check; done before the frame is built.
 294 int LIR_Assembler::check_icache() {
 295   Register receiver = FrameMap::receiver_opr->as_register();
 296   Register ic_klass = IC_Klass;
 297   int start_offset = __ offset();
 298   __ inline_cache_check(receiver, ic_klass);
 299 
 300   // if icache check fails, then jump to runtime routine
 301   // Note: RECEIVER must still contain the receiver!
 302   Label dont;
 303   __ br(Assembler::EQ, dont);
 304   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 305 
 306   // We align the verified entry point unless the method body
 307   // (including its inline cache check) will fit in a single 64-byte
 308   // icache line.
 309   if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
 310     // force alignment after the cache check.
 311     __ align(CodeEntryAlignment);
 312   }
 313 
 314   __ bind(dont);
 315   return start_offset;
 316 }
 317 
 318 
 319 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 320   if (o == NULL) {
 321     __ mov(reg, zr);
 322   } else {
 323     __ movoop(reg, o, /*immediate*/true);
 324   }
 325 }
 326 
 327 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 328   address target = NULL;
 329   relocInfo::relocType reloc_type = relocInfo::none;
 330 
 331   switch (patching_id(info)) {
 332   case PatchingStub::access_field_id:
 333     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 334     reloc_type = relocInfo::section_word_type;
 335     break;
 336   case PatchingStub::load_klass_id:
 337     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 338     reloc_type = relocInfo::metadata_type;
 339     break;
 340   case PatchingStub::load_mirror_id:
 341     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 342     reloc_type = relocInfo::oop_type;
 343     break;
 344   case PatchingStub::load_appendix_id:
 345     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 346     reloc_type = relocInfo::oop_type;
 347     break;
 348   default: ShouldNotReachHere();
 349   }
 350 
 351   __ far_call(RuntimeAddress(target));
 352   add_call_info_here(info);
 353 }
 354 
 355 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 356   deoptimize_trap(info);
 357 }
 358 
 359 
 360 // This specifies the rsp decrement needed to build the frame
 361 int LIR_Assembler::initial_frame_size_in_bytes() const {
 362   // if rounding, must let FrameMap know!
 363 
 364   // The frame_map records size in slots (32bit word)
 365 
 366   // subtract two words to account for return address and link
 367   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 368 }
 369 
 370 
 371 int LIR_Assembler::emit_exception_handler() {
 372   // if the last instruction is a call (typically to do a throw which
 373   // is coming at the end after block reordering) the return address
 374   // must still point into the code area in order to avoid assertion
 375   // failures when searching for the corresponding bci => add a nop
 376   // (was bug 5/14/1999 - gri)
 377   __ nop();
 378 
 379   // generate code for exception handler
 380   address handler_base = __ start_a_stub(exception_handler_size());
 381   if (handler_base == NULL) {
 382     // not enough space left for the handler
 383     bailout("exception handler overflow");
 384     return -1;
 385   }
 386 
 387   int offset = code_offset();
 388 
 389   // the exception oop and pc are in r0, and r3
 390   // no other registers need to be preserved, so invalidate them
 391   __ invalidate_registers(false, true, true, false, true, true);
 392 
 393   // check that there is really an exception
 394   __ verify_not_null_oop(r0);
 395 
 396   // search an exception handler (r0: exception oop, r3: throwing pc)
 397   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));  __ should_not_reach_here();
 398   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 399   __ end_a_stub();
 400 
 401   return offset;
 402 }
 403 
 404 
 405 // Emit the code to remove the frame from the stack in the exception
 406 // unwind path.
 407 int LIR_Assembler::emit_unwind_handler() {
 408 #ifndef PRODUCT
 409   if (CommentedAssembly) {
 410     _masm->block_comment("Unwind handler");
 411   }
 412 #endif
 413 
 414   int offset = code_offset();
 415 
 416   // Fetch the exception from TLS and clear out exception related thread state
 417   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 418   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 419   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 420 
 421   __ bind(_unwind_handler_entry);
 422   __ verify_not_null_oop(r0);
 423   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 424     __ mov(r19, r0);  // Preserve the exception
 425   }
 426 
 427   // Preform needed unlocking
 428   MonitorExitStub* stub = NULL;
 429   if (method()->is_synchronized()) {
 430     monitor_address(0, FrameMap::r0_opr);
 431     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 432     __ unlock_object(r5, r4, r0, *stub->entry());
 433     __ bind(*stub->continuation());
 434   }
 435 
 436   if (compilation()->env()->dtrace_method_probes()) {
 437     __ call_Unimplemented();
 438 #if 0
 439     __ movptr(Address(rsp, 0), rax);
 440     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 441     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 442 #endif
 443   }
 444 
 445   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 446     __ mov(r0, r19);  // Restore the exception
 447   }
 448 
 449   // remove the activation and dispatch to the unwind handler
 450   __ block_comment("remove_frame and dispatch to the unwind handler");
 451   __ remove_frame(initial_frame_size_in_bytes());
 452   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 453 
 454   // Emit the slow path assembly
 455   if (stub != NULL) {
 456     stub->emit_code(this);
 457   }
 458 
 459   return offset;
 460 }
 461 
 462 
 463 int LIR_Assembler::emit_deopt_handler() {
 464   // if the last instruction is a call (typically to do a throw which
 465   // is coming at the end after block reordering) the return address
 466   // must still point into the code area in order to avoid assertion
 467   // failures when searching for the corresponding bci => add a nop
 468   // (was bug 5/14/1999 - gri)
 469   __ nop();
 470 
 471   // generate code for exception handler
 472   address handler_base = __ start_a_stub(deopt_handler_size());
 473   if (handler_base == NULL) {
 474     // not enough space left for the handler
 475     bailout("deopt handler overflow");
 476     return -1;
 477   }
 478 
 479   int offset = code_offset();
 480 
 481   __ adr(lr, pc());
 482   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 483   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 484   __ end_a_stub();
 485 
 486   return offset;
 487 }
 488 
 489 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 490   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 491   int pc_offset = code_offset();
 492   flush_debug_info(pc_offset);
 493   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 494   if (info->exception_handlers() != NULL) {
 495     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 496   }
 497 }
 498 
 499 void LIR_Assembler::return_op(LIR_Opr result) {
 500   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 501 
 502   // Pop the stack before the safepoint code
 503   __ remove_frame(initial_frame_size_in_bytes());
 504 
 505   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 506     __ reserved_stack_check();
 507   }
 508 
 509   address polling_page(os::get_polling_page());
 510   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 511   __ ret(lr);
 512 }
 513 
 514 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 515   address polling_page(os::get_polling_page());
 516   guarantee(info != NULL, "Shouldn't be NULL");
 517   assert(os::is_poll_address(polling_page), "should be");
 518   __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
 519   add_debug_info_for_branch(info);  // This isn't just debug info:
 520                                     // it's the oop map
 521   __ read_polling_page(rscratch1, relocInfo::poll_type);
 522   return __ offset();
 523 }
 524 
 525 
 526 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 527   if (from_reg == r31_sp)
 528     from_reg = sp;
 529   if (to_reg == r31_sp)
 530     to_reg = sp;
 531   __ mov(to_reg, from_reg);
 532 }
 533 
 534 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 535 
 536 
 537 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 538   assert(src->is_constant(), "should not call otherwise");
 539   assert(dest->is_register(), "should not call otherwise");
 540   LIR_Const* c = src->as_constant_ptr();
 541 
 542   switch (c->type()) {
 543     case T_INT: {
 544       assert(patch_code == lir_patch_none, "no patching handled here");
 545       __ movw(dest->as_register(), c->as_jint());
 546       break;
 547     }
 548 
 549     case T_ADDRESS: {
 550       assert(patch_code == lir_patch_none, "no patching handled here");
 551       __ mov(dest->as_register(), c->as_jint());
 552       break;
 553     }
 554 
 555     case T_LONG: {
 556       assert(patch_code == lir_patch_none, "no patching handled here");
 557       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 558       break;
 559     }
 560 
 561     case T_OBJECT: {
 562         if (patch_code == lir_patch_none) {
 563           jobject2reg(c->as_jobject(), dest->as_register());
 564         } else {
 565           jobject2reg_with_patching(dest->as_register(), info);
 566         }
 567       break;
 568     }
 569 
 570     case T_METADATA: {
 571       if (patch_code != lir_patch_none) {
 572         klass2reg_with_patching(dest->as_register(), info);
 573       } else {
 574         __ mov_metadata(dest->as_register(), c->as_metadata());
 575       }
 576       break;
 577     }
 578 
 579     case T_FLOAT: {
 580       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 581         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 582       } else {
 583         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 584         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 585       }
 586       break;
 587     }
 588 
 589     case T_DOUBLE: {
 590       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 591         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 592       } else {
 593         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 594         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 595       }
 596       break;
 597     }
 598 
 599     default:
 600       ShouldNotReachHere();
 601   }
 602 }
 603 
 604 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 605   LIR_Const* c = src->as_constant_ptr();
 606   switch (c->type()) {
 607   case T_OBJECT:
 608     {
 609       if (! c->as_jobject())
 610         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 611       else {
 612         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 613         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 614       }
 615     }
 616     break;
 617   case T_ADDRESS:
 618     {
 619       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 620       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 621     }
 622   case T_INT:
 623   case T_FLOAT:
 624     {
 625       Register reg = zr;
 626       if (c->as_jint_bits() == 0)
 627         __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 628       else {
 629         __ movw(rscratch1, c->as_jint_bits());
 630         __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
 631       }
 632     }
 633     break;
 634   case T_LONG:
 635   case T_DOUBLE:
 636     {
 637       Register reg = zr;
 638       if (c->as_jlong_bits() == 0)
 639         __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
 640                                                  lo_word_offset_in_bytes));
 641       else {
 642         __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
 643         __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
 644                                                         lo_word_offset_in_bytes));
 645       }
 646     }
 647     break;
 648   default:
 649     ShouldNotReachHere();
 650   }
 651 }
 652 
 653 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 654   assert(src->is_constant(), "should not call otherwise");
 655   LIR_Const* c = src->as_constant_ptr();
 656   LIR_Address* to_addr = dest->as_address_ptr();
 657 
 658   void (Assembler::* insn)(Register Rt, const Address &adr);
 659 
 660   switch (type) {
 661   case T_ADDRESS:
 662     assert(c->as_jint() == 0, "should be");
 663     insn = &Assembler::str;
 664     break;
 665   case T_LONG:
 666     assert(c->as_jlong() == 0, "should be");
 667     insn = &Assembler::str;
 668     break;
 669   case T_INT:
 670     assert(c->as_jint() == 0, "should be");
 671     insn = &Assembler::strw;
 672     break;
 673   case T_OBJECT:
 674   case T_ARRAY:
 675     assert(c->as_jobject() == 0, "should be");
 676     if (UseCompressedOops && !wide) {
 677       insn = &Assembler::strw;
 678     } else {
 679       insn = &Assembler::str;
 680     }
 681     break;
 682   case T_CHAR:
 683   case T_SHORT:
 684     assert(c->as_jint() == 0, "should be");
 685     insn = &Assembler::strh;
 686     break;
 687   case T_BOOLEAN:
 688   case T_BYTE:
 689     assert(c->as_jint() == 0, "should be");
 690     insn = &Assembler::strb;
 691     break;
 692   default:
 693     ShouldNotReachHere();
 694     insn = &Assembler::str;  // unreachable
 695   }
 696 
 697   if (info) add_debug_info_for_null_check_here(info);
 698   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 699 }
 700 
 701 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 702   assert(src->is_register(), "should not call otherwise");
 703   assert(dest->is_register(), "should not call otherwise");
 704 
 705   // move between cpu-registers
 706   if (dest->is_single_cpu()) {
 707     if (src->type() == T_LONG) {
 708       // Can do LONG -> OBJECT
 709       move_regs(src->as_register_lo(), dest->as_register());
 710       return;
 711     }
 712     assert(src->is_single_cpu(), "must match");
 713     if (src->type() == T_OBJECT) {
 714       __ verify_oop(src->as_register());
 715     }
 716     move_regs(src->as_register(), dest->as_register());
 717 
 718   } else if (dest->is_double_cpu()) {
 719     if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
 720       // Surprising to me but we can see move of a long to t_object
 721       __ verify_oop(src->as_register());
 722       move_regs(src->as_register(), dest->as_register_lo());
 723       return;
 724     }
 725     assert(src->is_double_cpu(), "must match");
 726     Register f_lo = src->as_register_lo();
 727     Register f_hi = src->as_register_hi();
 728     Register t_lo = dest->as_register_lo();
 729     Register t_hi = dest->as_register_hi();
 730     assert(f_hi == f_lo, "must be same");
 731     assert(t_hi == t_lo, "must be same");
 732     move_regs(f_lo, t_lo);
 733 
 734   } else if (dest->is_single_fpu()) {
 735     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 736 
 737   } else if (dest->is_double_fpu()) {
 738     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 739 
 740   } else {
 741     ShouldNotReachHere();
 742   }
 743 }
 744 
 745 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 746   if (src->is_single_cpu()) {
 747     if (type == T_ARRAY || type == T_OBJECT) {
 748       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 749       __ verify_oop(src->as_register());
 750     } else if (type == T_METADATA || type == T_DOUBLE) {
 751       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 752     } else {
 753       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 754     }
 755 
 756   } else if (src->is_double_cpu()) {
 757     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 758     __ str(src->as_register_lo(), dest_addr_LO);
 759 
 760   } else if (src->is_single_fpu()) {
 761     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 762     __ strs(src->as_float_reg(), dest_addr);
 763 
 764   } else if (src->is_double_fpu()) {
 765     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 766     __ strd(src->as_double_reg(), dest_addr);
 767 
 768   } else {
 769     ShouldNotReachHere();
 770   }
 771 
 772 }
 773 
 774 
 775 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 776   LIR_Address* to_addr = dest->as_address_ptr();
 777   PatchingStub* patch = NULL;
 778   Register compressed_src = rscratch1;
 779 
 780   if (patch_code != lir_patch_none) {
 781     deoptimize_trap(info);
 782     return;
 783   }
 784 
 785   if (type == T_ARRAY || type == T_OBJECT) {
 786     __ verify_oop(src->as_register());
 787 
 788     if (UseCompressedOops && !wide) {
 789       __ encode_heap_oop(compressed_src, src->as_register());
 790     } else {
 791       compressed_src = src->as_register();
 792     }
 793   }
 794 
 795   int null_check_here = code_offset();
 796   switch (type) {
 797     case T_FLOAT: {
 798       __ strs(src->as_float_reg(), as_Address(to_addr));
 799       break;
 800     }
 801 
 802     case T_DOUBLE: {
 803       __ strd(src->as_double_reg(), as_Address(to_addr));
 804       break;
 805     }
 806 
 807     case T_ARRAY:   // fall through
 808     case T_OBJECT:  // fall through
 809       if (UseCompressedOops && !wide) {
 810         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 811       } else {
 812          __ str(compressed_src, as_Address(to_addr));
 813       }
 814       break;
 815     case T_METADATA:
 816       // We get here to store a method pointer to the stack to pass to
 817       // a dtrace runtime call. This can't work on 64 bit with
 818       // compressed klass ptrs: T_METADATA can be a compressed klass
 819       // ptr or a 64 bit method pointer.
 820       ShouldNotReachHere();
 821       __ str(src->as_register(), as_Address(to_addr));
 822       break;
 823     case T_ADDRESS:
 824       __ str(src->as_register(), as_Address(to_addr));
 825       break;
 826     case T_INT:
 827       __ strw(src->as_register(), as_Address(to_addr));
 828       break;
 829 
 830     case T_LONG: {
 831       __ str(src->as_register_lo(), as_Address_lo(to_addr));
 832       break;
 833     }
 834 
 835     case T_BYTE:    // fall through
 836     case T_BOOLEAN: {
 837       __ strb(src->as_register(), as_Address(to_addr));
 838       break;
 839     }
 840 
 841     case T_CHAR:    // fall through
 842     case T_SHORT:
 843       __ strh(src->as_register(), as_Address(to_addr));
 844       break;
 845 
 846     default:
 847       ShouldNotReachHere();
 848   }
 849   if (info != NULL) {
 850     add_debug_info_for_null_check(null_check_here, info);
 851   }
 852 }
 853 
 854 
 855 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 856   assert(src->is_stack(), "should not call otherwise");
 857   assert(dest->is_register(), "should not call otherwise");
 858 
 859   if (dest->is_single_cpu()) {
 860     if (type == T_ARRAY || type == T_OBJECT) {
 861       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 862       __ verify_oop(dest->as_register());
 863     } else if (type == T_METADATA) {
 864       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 865     } else {
 866       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 867     }
 868 
 869   } else if (dest->is_double_cpu()) {
 870     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 871     __ ldr(dest->as_register_lo(), src_addr_LO);
 872 
 873   } else if (dest->is_single_fpu()) {
 874     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 875     __ ldrs(dest->as_float_reg(), src_addr);
 876 
 877   } else if (dest->is_double_fpu()) {
 878     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 879     __ ldrd(dest->as_double_reg(), src_addr);
 880 
 881   } else {
 882     ShouldNotReachHere();
 883   }
 884 }
 885 
 886 
 887 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 888   address target = NULL;
 889   relocInfo::relocType reloc_type = relocInfo::none;
 890 
 891   switch (patching_id(info)) {
 892   case PatchingStub::access_field_id:
 893     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 894     reloc_type = relocInfo::section_word_type;
 895     break;
 896   case PatchingStub::load_klass_id:
 897     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 898     reloc_type = relocInfo::metadata_type;
 899     break;
 900   case PatchingStub::load_mirror_id:
 901     target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 902     reloc_type = relocInfo::oop_type;
 903     break;
 904   case PatchingStub::load_appendix_id:
 905     target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 906     reloc_type = relocInfo::oop_type;
 907     break;
 908   default: ShouldNotReachHere();
 909   }
 910 
 911   __ far_call(RuntimeAddress(target));
 912   add_call_info_here(info);
 913 }
 914 
 915 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 916 
 917   LIR_Opr temp;
 918   if (type == T_LONG || type == T_DOUBLE)
 919     temp = FrameMap::rscratch1_long_opr;
 920   else
 921     temp = FrameMap::rscratch1_opr;
 922 
 923   stack2reg(src, temp, src->type());
 924   reg2stack(temp, dest, dest->type(), false);
 925 }
 926 
 927 
 928 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
 929   LIR_Address* addr = src->as_address_ptr();
 930   LIR_Address* from_addr = src->as_address_ptr();
 931 
 932   if (addr->base()->type() == T_OBJECT) {
 933     __ verify_oop(addr->base()->as_pointer_register());
 934   }
 935 
 936   if (patch_code != lir_patch_none) {
 937     deoptimize_trap(info);
 938     return;
 939   }
 940 
 941   if (info != NULL) {
 942     add_debug_info_for_null_check_here(info);
 943   }
 944   int null_check_here = code_offset();
 945   switch (type) {
 946     case T_FLOAT: {
 947       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 948       break;
 949     }
 950 
 951     case T_DOUBLE: {
 952       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 953       break;
 954     }
 955 
 956     case T_ARRAY:   // fall through
 957     case T_OBJECT:  // fall through
 958       if (UseCompressedOops && !wide) {
 959         __ ldrw(dest->as_register(), as_Address(from_addr));
 960       } else {
 961          __ ldr(dest->as_register(), as_Address(from_addr));
 962       }
 963       break;
 964     case T_METADATA:
 965       // We get here to store a method pointer to the stack to pass to
 966       // a dtrace runtime call. This can't work on 64 bit with
 967       // compressed klass ptrs: T_METADATA can be a compressed klass
 968       // ptr or a 64 bit method pointer.
 969       ShouldNotReachHere();
 970       __ ldr(dest->as_register(), as_Address(from_addr));
 971       break;
 972     case T_ADDRESS:
 973       // FIXME: OMG this is a horrible kludge.  Any offset from an
 974       // address that matches klass_offset_in_bytes() will be loaded
 975       // as a word, not a long.
 976       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 977         __ ldrw(dest->as_register(), as_Address(from_addr));
 978       } else {
 979         __ ldr(dest->as_register(), as_Address(from_addr));
 980       }
 981       break;
 982     case T_INT:
 983       __ ldrw(dest->as_register(), as_Address(from_addr));
 984       break;
 985 
 986     case T_LONG: {
 987       __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
 988       break;
 989     }
 990 
 991     case T_BYTE:
 992       __ ldrsb(dest->as_register(), as_Address(from_addr));
 993       break;
 994     case T_BOOLEAN: {
 995       __ ldrb(dest->as_register(), as_Address(from_addr));
 996       break;
 997     }
 998 
 999     case T_CHAR:
1000       __ ldrh(dest->as_register(), as_Address(from_addr));
1001       break;
1002     case T_SHORT:
1003       __ ldrsh(dest->as_register(), as_Address(from_addr));
1004       break;
1005 
1006     default:
1007       ShouldNotReachHere();
1008   }
1009 
1010   if (type == T_ARRAY || type == T_OBJECT) {
1011     if (UseCompressedOops && !wide) {
1012       __ decode_heap_oop(dest->as_register());
1013     }
1014     __ verify_oop(dest->as_register());
1015   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1016     if (UseCompressedClassPointers) {
1017       __ decode_klass_not_null(dest->as_register());
1018     }
1019   }
1020 }
1021 
1022 
1023 int LIR_Assembler::array_element_size(BasicType type) const {
1024   int elem_size = type2aelembytes(type);
1025   return exact_log2(elem_size);
1026 }
1027 
1028 void LIR_Assembler::arithmetic_idiv(LIR_Op3* op, bool is_irem) {
1029   Register Rdividend = op->in_opr1()->as_register();
1030   Register Rdivisor  = op->in_opr2()->as_register();
1031   Register Rscratch  = op->in_opr3()->as_register();
1032   Register Rresult   = op->result_opr()->as_register();
1033   int divisor = -1;
1034 
1035   /*
1036   TODO: For some reason, using the Rscratch that gets passed in is
1037   not possible because the register allocator does not see the tmp reg
1038   as used, and assignes it the same register as Rdividend. We use rscratch1
1039    instead.
1040 
1041   assert(Rdividend != Rscratch, "");
1042   assert(Rdivisor  != Rscratch, "");
1043   */
1044 
1045   if (Rdivisor == noreg && is_power_of_2(divisor)) {
1046     // convert division by a power of two into some shifts and logical operations
1047   }
1048 
1049   __ corrected_idivl(Rresult, Rdividend, Rdivisor, is_irem, rscratch1);
1050 }
1051 
1052 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1053   switch (op->code()) {
1054   case lir_idiv:
1055     arithmetic_idiv(op, false);
1056     break;
1057   case lir_irem:
1058     arithmetic_idiv(op, true);
1059     break;
1060   case lir_fmad:
1061     __ fmaddd(op->result_opr()->as_double_reg(),
1062               op->in_opr1()->as_double_reg(),
1063               op->in_opr2()->as_double_reg(),
1064               op->in_opr3()->as_double_reg());
1065     break;
1066   case lir_fmaf:
1067     __ fmadds(op->result_opr()->as_float_reg(),
1068               op->in_opr1()->as_float_reg(),
1069               op->in_opr2()->as_float_reg(),
1070               op->in_opr3()->as_float_reg());
1071     break;
1072   default:      ShouldNotReachHere(); break;
1073   }
1074 }
1075 
1076 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1077 #ifdef ASSERT
1078   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1079   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1080   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1081 #endif
1082 
1083   if (op->cond() == lir_cond_always) {
1084     if (op->info() != NULL) add_debug_info_for_branch(op->info());
1085     __ b(*(op->label()));
1086   } else {
1087     Assembler::Condition acond;
1088     if (op->code() == lir_cond_float_branch) {
1089       bool is_unordered = (op->ublock() == op->block());
1090       // Assembler::EQ does not permit unordered branches, so we add
1091       // another branch here.  Likewise, Assembler::NE does not permit
1092       // ordered branches.
1093       if (is_unordered && op->cond() == lir_cond_equal
1094           || !is_unordered && op->cond() == lir_cond_notEqual)
1095         __ br(Assembler::VS, *(op->ublock()->label()));
1096       switch(op->cond()) {
1097       case lir_cond_equal:        acond = Assembler::EQ; break;
1098       case lir_cond_notEqual:     acond = Assembler::NE; break;
1099       case lir_cond_less:         acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1100       case lir_cond_lessEqual:    acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1101       case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1102       case lir_cond_greater:      acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1103       default:                    ShouldNotReachHere();
1104         acond = Assembler::EQ;  // unreachable
1105       }
1106     } else {
1107       switch (op->cond()) {
1108         case lir_cond_equal:        acond = Assembler::EQ; break;
1109         case lir_cond_notEqual:     acond = Assembler::NE; break;
1110         case lir_cond_less:         acond = Assembler::LT; break;
1111         case lir_cond_lessEqual:    acond = Assembler::LE; break;
1112         case lir_cond_greaterEqual: acond = Assembler::GE; break;
1113         case lir_cond_greater:      acond = Assembler::GT; break;
1114         case lir_cond_belowEqual:   acond = Assembler::LS; break;
1115         case lir_cond_aboveEqual:   acond = Assembler::HS; break;
1116         default:                    ShouldNotReachHere();
1117           acond = Assembler::EQ;  // unreachable
1118       }
1119     }
1120     __ br(acond,*(op->label()));
1121   }
1122 }
1123 
1124 
1125 
1126 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1127   LIR_Opr src  = op->in_opr();
1128   LIR_Opr dest = op->result_opr();
1129 
1130   switch (op->bytecode()) {
1131     case Bytecodes::_i2f:
1132       {
1133         __ scvtfws(dest->as_float_reg(), src->as_register());
1134         break;
1135       }
1136     case Bytecodes::_i2d:
1137       {
1138         __ scvtfwd(dest->as_double_reg(), src->as_register());
1139         break;
1140       }
1141     case Bytecodes::_l2d:
1142       {
1143         __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1144         break;
1145       }
1146     case Bytecodes::_l2f:
1147       {
1148         __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1149         break;
1150       }
1151     case Bytecodes::_f2d:
1152       {
1153         __ fcvts(dest->as_double_reg(), src->as_float_reg());
1154         break;
1155       }
1156     case Bytecodes::_d2f:
1157       {
1158         __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1159         break;
1160       }
1161     case Bytecodes::_i2c:
1162       {
1163         __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1164         break;
1165       }
1166     case Bytecodes::_i2l:
1167       {
1168         __ sxtw(dest->as_register_lo(), src->as_register());
1169         break;
1170       }
1171     case Bytecodes::_i2s:
1172       {
1173         __ sxth(dest->as_register(), src->as_register());
1174         break;
1175       }
1176     case Bytecodes::_i2b:
1177       {
1178         __ sxtb(dest->as_register(), src->as_register());
1179         break;
1180       }
1181     case Bytecodes::_l2i:
1182       {
1183         _masm->block_comment("FIXME: This could be a no-op");
1184         __ uxtw(dest->as_register(), src->as_register_lo());
1185         break;
1186       }
1187     case Bytecodes::_d2l:
1188       {
1189         __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1190         break;
1191       }
1192     case Bytecodes::_f2i:
1193       {
1194         __ fcvtzsw(dest->as_register(), src->as_float_reg());
1195         break;
1196       }
1197     case Bytecodes::_f2l:
1198       {
1199         __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1200         break;
1201       }
1202     case Bytecodes::_d2i:
1203       {
1204         __ fcvtzdw(dest->as_register(), src->as_double_reg());
1205         break;
1206       }
1207     default: ShouldNotReachHere();
1208   }
1209 }
1210 
1211 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1212   if (op->init_check()) {
1213     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1214                                InstanceKlass::init_state_offset()));
1215     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1216     add_debug_info_for_null_check_here(op->stub()->info());
1217     __ br(Assembler::NE, *op->stub()->entry());
1218   }
1219   __ allocate_object(op->obj()->as_register(),
1220                      op->tmp1()->as_register(),
1221                      op->tmp2()->as_register(),
1222                      op->header_size(),
1223                      op->object_size(),
1224                      op->klass()->as_register(),
1225                      *op->stub()->entry());
1226   __ bind(*op->stub()->continuation());
1227 }
1228 
1229 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1230   Register len =  op->len()->as_register();
1231   __ uxtw(len, len);
1232 
1233   if (UseSlowPath ||
1234       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1235       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1236     __ b(*op->stub()->entry());
1237   } else {
1238     Register tmp1 = op->tmp1()->as_register();
1239     Register tmp2 = op->tmp2()->as_register();
1240     Register tmp3 = op->tmp3()->as_register();
1241     if (len == tmp1) {
1242       tmp1 = tmp3;
1243     } else if (len == tmp2) {
1244       tmp2 = tmp3;
1245     } else if (len == tmp3) {
1246       // everything is ok
1247     } else {
1248       __ mov(tmp3, len);
1249     }
1250     __ allocate_array(op->obj()->as_register(),
1251                       len,
1252                       tmp1,
1253                       tmp2,
1254                       arrayOopDesc::header_size(op->type()),
1255                       array_element_size(op->type()),
1256                       op->klass()->as_register(),
1257                       *op->stub()->entry());
1258   }
1259   __ bind(*op->stub()->continuation());
1260 }
1261 
1262 void LIR_Assembler::type_profile_helper(Register mdo,
1263                                         ciMethodData *md, ciProfileData *data,
1264                                         Register recv, Label* update_done) {
1265   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1266     Label next_test;
1267     // See if the receiver is receiver[n].
1268     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1269     __ ldr(rscratch1, Address(rscratch2));
1270     __ cmp(recv, rscratch1);
1271     __ br(Assembler::NE, next_test);
1272     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1273     __ addptr(data_addr, DataLayout::counter_increment);
1274     __ b(*update_done);
1275     __ bind(next_test);
1276   }
1277 
1278   // Didn't find receiver; find next empty slot and fill it in
1279   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1280     Label next_test;
1281     __ lea(rscratch2,
1282            Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1283     Address recv_addr(rscratch2);
1284     __ ldr(rscratch1, recv_addr);
1285     __ cbnz(rscratch1, next_test);
1286     __ str(recv, recv_addr);
1287     __ mov(rscratch1, DataLayout::counter_increment);
1288     __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1289     __ str(rscratch1, Address(rscratch2));
1290     __ b(*update_done);
1291     __ bind(next_test);
1292   }
1293 }
1294 
1295 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1296   // we always need a stub for the failure case.
1297   CodeStub* stub = op->stub();
1298   Register obj = op->object()->as_register();
1299   Register k_RInfo = op->tmp1()->as_register();
1300   Register klass_RInfo = op->tmp2()->as_register();
1301   Register dst = op->result_opr()->as_register();
1302   ciKlass* k = op->klass();
1303   Register Rtmp1 = noreg;
1304 
1305   // check if it needs to be profiled
1306   ciMethodData* md;
1307   ciProfileData* data;
1308 
1309   const bool should_profile = op->should_profile();
1310 
1311   if (should_profile) {
1312     ciMethod* method = op->profiled_method();
1313     assert(method != NULL, "Should have method");
1314     int bci = op->profiled_bci();
1315     md = method->method_data_or_null();
1316     assert(md != NULL, "Sanity");
1317     data = md->bci_to_data(bci);
1318     assert(data != NULL,                "need data for type check");
1319     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1320   }
1321   Label profile_cast_success, profile_cast_failure;
1322   Label *success_target = should_profile ? &profile_cast_success : success;
1323   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1324 
1325   if (obj == k_RInfo) {
1326     k_RInfo = dst;
1327   } else if (obj == klass_RInfo) {
1328     klass_RInfo = dst;
1329   }
1330   if (k->is_loaded() && !UseCompressedClassPointers) {
1331     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1332   } else {
1333     Rtmp1 = op->tmp3()->as_register();
1334     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1335   }
1336 
1337   assert_different_registers(obj, k_RInfo, klass_RInfo);
1338 
1339     if (should_profile) {
1340       Label not_null;
1341       __ cbnz(obj, not_null);
1342       // Object is null; update MDO and exit
1343       Register mdo  = klass_RInfo;
1344       __ mov_metadata(mdo, md->constant_encoding());
1345       Address data_addr
1346         = __ form_address(rscratch2, mdo,
1347                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1348                           0);
1349       __ ldrb(rscratch1, data_addr);
1350       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1351       __ strb(rscratch1, data_addr);
1352       __ b(*obj_is_null);
1353       __ bind(not_null);
1354     } else {
1355       __ cbz(obj, *obj_is_null);
1356     }
1357 
1358   if (!k->is_loaded()) {
1359     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1360   } else {
1361     __ mov_metadata(k_RInfo, k->constant_encoding());
1362   }
1363   __ verify_oop(obj);
1364 
1365   if (op->fast_check()) {
1366     // get object class
1367     // not a safepoint as obj null check happens earlier
1368     __ load_klass(rscratch1, obj);
1369     __ cmp( rscratch1, k_RInfo);
1370 
1371     __ br(Assembler::NE, *failure_target);
1372     // successful cast, fall through to profile or jump
1373   } else {
1374     // get object class
1375     // not a safepoint as obj null check happens earlier
1376     __ load_klass(klass_RInfo, obj);
1377     if (k->is_loaded()) {
1378       // See if we get an immediate positive hit
1379       __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
1380       __ cmp(k_RInfo, rscratch1);
1381       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1382         __ br(Assembler::NE, *failure_target);
1383         // successful cast, fall through to profile or jump
1384       } else {
1385         // See if we get an immediate positive hit
1386         __ br(Assembler::EQ, *success_target);
1387         // check for self
1388         __ cmp(klass_RInfo, k_RInfo);
1389         __ br(Assembler::EQ, *success_target);
1390 
1391         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1392         __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1393         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1394         // result is a boolean
1395         __ cbzw(klass_RInfo, *failure_target);
1396         // successful cast, fall through to profile or jump
1397       }
1398     } else {
1399       // perform the fast part of the checking logic
1400       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1401       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1402       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1403       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1404       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1405       // result is a boolean
1406       __ cbz(k_RInfo, *failure_target);
1407       // successful cast, fall through to profile or jump
1408     }
1409   }
1410   if (should_profile) {
1411     Register mdo  = klass_RInfo, recv = k_RInfo;
1412     __ bind(profile_cast_success);
1413     __ mov_metadata(mdo, md->constant_encoding());
1414     __ load_klass(recv, obj);
1415     Label update_done;
1416     type_profile_helper(mdo, md, data, recv, success);
1417     __ b(*success);
1418 
1419     __ bind(profile_cast_failure);
1420     __ mov_metadata(mdo, md->constant_encoding());
1421     Address counter_addr
1422       = __ form_address(rscratch2, mdo,
1423                         md->byte_offset_of_slot(data, CounterData::count_offset()),
1424                         0);
1425     __ ldr(rscratch1, counter_addr);
1426     __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1427     __ str(rscratch1, counter_addr);
1428     __ b(*failure);
1429   }
1430   __ b(*success);
1431 }
1432 
1433 
1434 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1435   const bool should_profile = op->should_profile();
1436 
1437   LIR_Code code = op->code();
1438   if (code == lir_store_check) {
1439     Register value = op->object()->as_register();
1440     Register array = op->array()->as_register();
1441     Register k_RInfo = op->tmp1()->as_register();
1442     Register klass_RInfo = op->tmp2()->as_register();
1443     Register Rtmp1 = op->tmp3()->as_register();
1444 
1445     CodeStub* stub = op->stub();
1446 
1447     // check if it needs to be profiled
1448     ciMethodData* md;
1449     ciProfileData* data;
1450 
1451     if (should_profile) {
1452       ciMethod* method = op->profiled_method();
1453       assert(method != NULL, "Should have method");
1454       int bci = op->profiled_bci();
1455       md = method->method_data_or_null();
1456       assert(md != NULL, "Sanity");
1457       data = md->bci_to_data(bci);
1458       assert(data != NULL,                "need data for type check");
1459       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1460     }
1461     Label profile_cast_success, profile_cast_failure, done;
1462     Label *success_target = should_profile ? &profile_cast_success : &done;
1463     Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
1464 
1465     if (should_profile) {
1466       Label not_null;
1467       __ cbnz(value, not_null);
1468       // Object is null; update MDO and exit
1469       Register mdo  = klass_RInfo;
1470       __ mov_metadata(mdo, md->constant_encoding());
1471       Address data_addr
1472         = __ form_address(rscratch2, mdo,
1473                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1474                           0);
1475       __ ldrb(rscratch1, data_addr);
1476       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1477       __ strb(rscratch1, data_addr);
1478       __ b(done);
1479       __ bind(not_null);
1480     } else {
1481       __ cbz(value, done);
1482     }
1483 
1484     add_debug_info_for_null_check_here(op->info_for_exception());
1485     __ load_klass(k_RInfo, array);
1486     __ load_klass(klass_RInfo, value);
1487 
1488     // get instance klass (it's already uncompressed)
1489     __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1490     // perform the fast part of the checking logic
1491     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1492     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1493     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1494     __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1495     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1496     // result is a boolean
1497     __ cbzw(k_RInfo, *failure_target);
1498     // fall through to the success case
1499 
1500     if (should_profile) {
1501       Register mdo  = klass_RInfo, recv = k_RInfo;
1502       __ bind(profile_cast_success);
1503       __ mov_metadata(mdo, md->constant_encoding());
1504       __ load_klass(recv, value);
1505       Label update_done;
1506       type_profile_helper(mdo, md, data, recv, &done);
1507       __ b(done);
1508 
1509       __ bind(profile_cast_failure);
1510       __ mov_metadata(mdo, md->constant_encoding());
1511       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1512       __ lea(rscratch2, counter_addr);
1513       __ ldr(rscratch1, Address(rscratch2));
1514       __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1515       __ str(rscratch1, Address(rscratch2));
1516       __ b(*stub->entry());
1517     }
1518 
1519     __ bind(done);
1520   } else if (code == lir_checkcast) {
1521     Register obj = op->object()->as_register();
1522     Register dst = op->result_opr()->as_register();
1523     Label success;
1524     emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1525     __ bind(success);
1526     if (dst != obj) {
1527       __ mov(dst, obj);
1528     }
1529   } else if (code == lir_instanceof) {
1530     Register obj = op->object()->as_register();
1531     Register dst = op->result_opr()->as_register();
1532     Label success, failure, done;
1533     emit_typecheck_helper(op, &success, &failure, &failure);
1534     __ bind(failure);
1535     __ mov(dst, zr);
1536     __ b(done);
1537     __ bind(success);
1538     __ mov(dst, 1);
1539     __ bind(done);
1540   } else {
1541     ShouldNotReachHere();
1542   }
1543 }
1544 
1545 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1546   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1547   __ cset(rscratch1, Assembler::NE);
1548   __ membar(__ AnyAny);
1549 }
1550 
1551 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1552   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1553   __ cset(rscratch1, Assembler::NE);
1554   __ membar(__ AnyAny);
1555 }
1556 
1557 
1558 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1559   assert(VM_Version::supports_cx8(), "wrong machine");
1560   Register addr;
1561   if (op->addr()->is_register()) {
1562     addr = as_reg(op->addr());
1563   } else {
1564     assert(op->addr()->is_address(), "what else?");
1565     LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1566     assert(addr_ptr->disp() == 0, "need 0 disp");
1567     assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index");
1568     addr = as_reg(addr_ptr->base());
1569   }
1570   Register newval = as_reg(op->new_value());
1571   Register cmpval = as_reg(op->cmp_value());
1572   Label succeed, fail, around;
1573 
1574   if (op->code() == lir_cas_obj) {
1575     if (UseCompressedOops) {
1576       Register t1 = op->tmp1()->as_register();
1577       assert(op->tmp1()->is_valid(), "must be");
1578       __ encode_heap_oop(t1, cmpval);
1579       cmpval = t1;
1580       __ encode_heap_oop(rscratch2, newval);
1581       newval = rscratch2;
1582       casw(addr, newval, cmpval);
1583     } else {
1584       casl(addr, newval, cmpval);
1585     }
1586   } else if (op->code() == lir_cas_int) {
1587     casw(addr, newval, cmpval);
1588   } else {
1589     casl(addr, newval, cmpval);
1590   }
1591 }
1592 
1593 
1594 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1595 
1596   Assembler::Condition acond, ncond;
1597   switch (condition) {
1598   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1599   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1600   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1601   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1602   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1603   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1604   case lir_cond_belowEqual:
1605   case lir_cond_aboveEqual:
1606   default:                    ShouldNotReachHere();
1607     acond = Assembler::EQ; ncond = Assembler::NE;  // unreachable
1608   }
1609 
1610   assert(result->is_single_cpu() || result->is_double_cpu(),
1611          "expect single register for result");
1612   if (opr1->is_constant() && opr2->is_constant()
1613       && opr1->type() == T_INT && opr2->type() == T_INT) {
1614     jint val1 = opr1->as_jint();
1615     jint val2 = opr2->as_jint();
1616     if (val1 == 0 && val2 == 1) {
1617       __ cset(result->as_register(), ncond);
1618       return;
1619     } else if (val1 == 1 && val2 == 0) {
1620       __ cset(result->as_register(), acond);
1621       return;
1622     }
1623   }
1624 
1625   if (opr1->is_constant() && opr2->is_constant()
1626       && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1627     jlong val1 = opr1->as_jlong();
1628     jlong val2 = opr2->as_jlong();
1629     if (val1 == 0 && val2 == 1) {
1630       __ cset(result->as_register_lo(), ncond);
1631       return;
1632     } else if (val1 == 1 && val2 == 0) {
1633       __ cset(result->as_register_lo(), acond);
1634       return;
1635     }
1636   }
1637 
1638   if (opr1->is_stack()) {
1639     stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1640     opr1 = FrameMap::rscratch1_opr;
1641   } else if (opr1->is_constant()) {
1642     LIR_Opr tmp
1643       = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1644     const2reg(opr1, tmp, lir_patch_none, NULL);
1645     opr1 = tmp;
1646   }
1647 
1648   if (opr2->is_stack()) {
1649     stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1650     opr2 = FrameMap::rscratch2_opr;
1651   } else if (opr2->is_constant()) {
1652     LIR_Opr tmp
1653       = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1654     const2reg(opr2, tmp, lir_patch_none, NULL);
1655     opr2 = tmp;
1656   }
1657 
1658   if (result->type() == T_LONG)
1659     __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1660   else
1661     __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1662 }
1663 
1664 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1665   assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1666 
1667   if (left->is_single_cpu()) {
1668     Register lreg = left->as_register();
1669     Register dreg = as_reg(dest);
1670 
1671     if (right->is_single_cpu()) {
1672       // cpu register - cpu register
1673 
1674       assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1675              "should be");
1676       Register rreg = right->as_register();
1677       switch (code) {
1678       case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1679       case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1680       case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1681       default:      ShouldNotReachHere();
1682       }
1683 
1684     } else if (right->is_double_cpu()) {
1685       Register rreg = right->as_register_lo();
1686       // single_cpu + double_cpu: can happen with obj+long
1687       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1688       switch (code) {
1689       case lir_add: __ add(dreg, lreg, rreg); break;
1690       case lir_sub: __ sub(dreg, lreg, rreg); break;
1691       default: ShouldNotReachHere();
1692       }
1693     } else if (right->is_constant()) {
1694       // cpu register - constant
1695       jlong c;
1696 
1697       // FIXME.  This is fugly: we really need to factor all this logic.
1698       switch(right->type()) {
1699       case T_LONG:
1700         c = right->as_constant_ptr()->as_jlong();
1701         break;
1702       case T_INT:
1703       case T_ADDRESS:
1704         c = right->as_constant_ptr()->as_jint();
1705         break;
1706       default:
1707         ShouldNotReachHere();
1708         c = 0;  // unreachable
1709         break;
1710       }
1711 
1712       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1713       if (c == 0 && dreg == lreg) {
1714         COMMENT("effective nop elided");
1715         return;
1716       }
1717       switch(left->type()) {
1718       case T_INT:
1719         switch (code) {
1720         case lir_add: __ addw(dreg, lreg, c); break;
1721         case lir_sub: __ subw(dreg, lreg, c); break;
1722         default: ShouldNotReachHere();
1723         }
1724         break;
1725       case T_OBJECT:
1726       case T_ADDRESS:
1727         switch (code) {
1728         case lir_add: __ add(dreg, lreg, c); break;
1729         case lir_sub: __ sub(dreg, lreg, c); break;
1730         default: ShouldNotReachHere();
1731         }
1732         break;
1733         ShouldNotReachHere();
1734       }
1735     } else {
1736       ShouldNotReachHere();
1737     }
1738 
1739   } else if (left->is_double_cpu()) {
1740     Register lreg_lo = left->as_register_lo();
1741 
1742     if (right->is_double_cpu()) {
1743       // cpu register - cpu register
1744       Register rreg_lo = right->as_register_lo();
1745       switch (code) {
1746       case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1747       case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1748       case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1749       case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1750       case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1751       default:
1752         ShouldNotReachHere();
1753       }
1754 
1755     } else if (right->is_constant()) {
1756       jlong c = right->as_constant_ptr()->as_jlong_bits();
1757       Register dreg = as_reg(dest);
1758       assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1759       if (c == 0 && dreg == lreg_lo) {
1760         COMMENT("effective nop elided");
1761         return;
1762       }
1763       switch (code) {
1764         case lir_add: __ add(dreg, lreg_lo, c); break;
1765         case lir_sub: __ sub(dreg, lreg_lo, c); break;
1766         default:
1767           ShouldNotReachHere();
1768       }
1769     } else {
1770       ShouldNotReachHere();
1771     }
1772   } else if (left->is_single_fpu()) {
1773     assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1774     switch (code) {
1775     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1776     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1777     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1778     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1779     default:
1780       ShouldNotReachHere();
1781     }
1782   } else if (left->is_double_fpu()) {
1783     if (right->is_double_fpu()) {
1784       // cpu register - cpu register
1785       switch (code) {
1786       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1787       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1788       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1789       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1790       default:
1791         ShouldNotReachHere();
1792       }
1793     } else {
1794       if (right->is_constant()) {
1795         ShouldNotReachHere();
1796       }
1797       ShouldNotReachHere();
1798     }
1799   } else if (left->is_single_stack() || left->is_address()) {
1800     assert(left == dest, "left and dest must be equal");
1801     ShouldNotReachHere();
1802   } else {
1803     ShouldNotReachHere();
1804   }
1805 }
1806 
1807 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
1808 
1809 
1810 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1811   switch(code) {
1812   case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1813   case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1814   default      : ShouldNotReachHere();
1815   }
1816 }
1817 
1818 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1819 
1820   assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1821   Register Rleft = left->is_single_cpu() ? left->as_register() :
1822                                            left->as_register_lo();
1823    if (dst->is_single_cpu()) {
1824      Register Rdst = dst->as_register();
1825      if (right->is_constant()) {
1826        switch (code) {
1827          case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1828          case lir_logic_or:  __ orrw (Rdst, Rleft, right->as_jint()); break;
1829          case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1830          default: ShouldNotReachHere(); break;
1831        }
1832      } else {
1833        Register Rright = right->is_single_cpu() ? right->as_register() :
1834                                                   right->as_register_lo();
1835        switch (code) {
1836          case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1837          case lir_logic_or:  __ orrw (Rdst, Rleft, Rright); break;
1838          case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1839          default: ShouldNotReachHere(); break;
1840        }
1841      }
1842    } else {
1843      Register Rdst = dst->as_register_lo();
1844      if (right->is_constant()) {
1845        switch (code) {
1846          case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1847          case lir_logic_or:  __ orr (Rdst, Rleft, right->as_jlong()); break;
1848          case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1849          default: ShouldNotReachHere(); break;
1850        }
1851      } else {
1852        Register Rright = right->is_single_cpu() ? right->as_register() :
1853                                                   right->as_register_lo();
1854        switch (code) {
1855          case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1856          case lir_logic_or:  __ orr (Rdst, Rleft, Rright); break;
1857          case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1858          default: ShouldNotReachHere(); break;
1859        }
1860      }
1861    }
1862 }
1863 
1864 
1865 
1866 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
1867 
1868 
1869 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1870   if (opr1->is_constant() && opr2->is_single_cpu()) {
1871     // tableswitch
1872     Register reg = as_reg(opr2);
1873     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1874     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1875   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1876     Register reg1 = as_reg(opr1);
1877     if (opr2->is_single_cpu()) {
1878       // cpu register - cpu register
1879       Register reg2 = opr2->as_register();
1880       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1881         __ cmpoop(reg1, reg2);
1882       } else {
1883         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1884         __ cmpw(reg1, reg2);
1885       }
1886       return;
1887     }
1888     if (opr2->is_double_cpu()) {
1889       // cpu register - cpu register
1890       Register reg2 = opr2->as_register_lo();
1891       __ cmp(reg1, reg2);
1892       return;
1893     }
1894 
1895     if (opr2->is_constant()) {
1896       bool is_32bit = false; // width of register operand
1897       jlong imm;
1898 
1899       switch(opr2->type()) {
1900       case T_INT:
1901         imm = opr2->as_constant_ptr()->as_jint();
1902         is_32bit = true;
1903         break;
1904       case T_LONG:
1905         imm = opr2->as_constant_ptr()->as_jlong();
1906         break;
1907       case T_ADDRESS:
1908         imm = opr2->as_constant_ptr()->as_jint();
1909         break;
1910       case T_OBJECT:
1911       case T_ARRAY:
1912         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1913         __ cmpoop(reg1, rscratch1);
1914         return;
1915       default:
1916         ShouldNotReachHere();
1917         imm = 0;  // unreachable
1918         break;
1919       }
1920 
1921       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1922         if (is_32bit)
1923           __ cmpw(reg1, imm);
1924         else
1925           __ cmp(reg1, imm);
1926         return;
1927       } else {
1928         __ mov(rscratch1, imm);
1929         if (is_32bit)
1930           __ cmpw(reg1, rscratch1);
1931         else
1932           __ cmp(reg1, rscratch1);
1933         return;
1934       }
1935     } else
1936       ShouldNotReachHere();
1937   } else if (opr1->is_single_fpu()) {
1938     FloatRegister reg1 = opr1->as_float_reg();
1939     assert(opr2->is_single_fpu(), "expect single float register");
1940     FloatRegister reg2 = opr2->as_float_reg();
1941     __ fcmps(reg1, reg2);
1942   } else if (opr1->is_double_fpu()) {
1943     FloatRegister reg1 = opr1->as_double_reg();
1944     assert(opr2->is_double_fpu(), "expect double float register");
1945     FloatRegister reg2 = opr2->as_double_reg();
1946     __ fcmpd(reg1, reg2);
1947   } else {
1948     ShouldNotReachHere();
1949   }
1950 }
1951 
1952 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1953   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1954     bool is_unordered_less = (code == lir_ucmp_fd2i);
1955     if (left->is_single_fpu()) {
1956       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1957     } else if (left->is_double_fpu()) {
1958       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1959     } else {
1960       ShouldNotReachHere();
1961     }
1962   } else if (code == lir_cmp_l2i) {
1963     Label done;
1964     __ cmp(left->as_register_lo(), right->as_register_lo());
1965     __ mov(dst->as_register(), (u_int64_t)-1L);
1966     __ br(Assembler::LT, done);
1967     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1968     __ bind(done);
1969   } else {
1970     ShouldNotReachHere();
1971   }
1972 }
1973 
1974 
1975 void LIR_Assembler::align_call(LIR_Code code) {  }
1976 
1977 
1978 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1979   address call = __ trampoline_call(Address(op->addr(), rtype));
1980   if (call == NULL) {
1981     bailout("trampoline stub overflow");
1982     return;
1983   }
1984   add_call_info(code_offset(), op->info());
1985 }
1986 
1987 
1988 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1989   address call = __ ic_call(op->addr());
1990   if (call == NULL) {
1991     bailout("trampoline stub overflow");
1992     return;
1993   }
1994   add_call_info(code_offset(), op->info());
1995 }
1996 
1997 
1998 /* Currently, vtable-dispatch is only enabled for sparc platforms */
1999 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2000   ShouldNotReachHere();
2001 }
2002 
2003 
2004 void LIR_Assembler::emit_static_call_stub() {
2005   address call_pc = __ pc();
2006   address stub = __ start_a_stub(call_stub_size());
2007   if (stub == NULL) {
2008     bailout("static call stub overflow");
2009     return;
2010   }
2011 
2012   int start = __ offset();
2013 
2014   __ relocate(static_stub_Relocation::spec(call_pc));
2015   __ mov_metadata(rmethod, (Metadata*)NULL);
2016   __ movptr(rscratch1, 0);
2017   __ br(rscratch1);
2018 
2019   assert(__ offset() - start <= call_stub_size(), "stub too big");
2020   __ end_a_stub();
2021 }
2022 
2023 
2024 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2025   assert(exceptionOop->as_register() == r0, "must match");
2026   assert(exceptionPC->as_register() == r3, "must match");
2027 
2028   // exception object is not added to oop map by LinearScan
2029   // (LinearScan assumes that no oops are in fixed registers)
2030   info->add_register_oop(exceptionOop);
2031   Runtime1::StubID unwind_id;
2032 
2033   // get current pc information
2034   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2035   int pc_for_athrow_offset = __ offset();
2036   InternalAddress pc_for_athrow(__ pc());
2037   __ adr(exceptionPC->as_register(), pc_for_athrow);
2038   add_call_info(pc_for_athrow_offset, info); // for exception handler
2039 
2040   __ verify_not_null_oop(r0);
2041   // search an exception handler (r0: exception oop, r3: throwing pc)
2042   if (compilation()->has_fpu_code()) {
2043     unwind_id = Runtime1::handle_exception_id;
2044   } else {
2045     unwind_id = Runtime1::handle_exception_nofpu_id;
2046   }
2047   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2048 
2049   // FIXME: enough room for two byte trap   ????
2050   __ nop();
2051 }
2052 
2053 
2054 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2055   assert(exceptionOop->as_register() == r0, "must match");
2056 
2057   __ b(_unwind_handler_entry);
2058 }
2059 
2060 
2061 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2062   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2063   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2064 
2065   switch (left->type()) {
2066     case T_INT: {
2067       switch (code) {
2068       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2069       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2070       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2071       default:
2072         ShouldNotReachHere();
2073         break;
2074       }
2075       break;
2076     case T_LONG:
2077     case T_ADDRESS:
2078     case T_OBJECT:
2079       switch (code) {
2080       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2081       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2082       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2083       default:
2084         ShouldNotReachHere();
2085         break;
2086       }
2087       break;
2088     default:
2089       ShouldNotReachHere();
2090       break;
2091     }
2092   }
2093 }
2094 
2095 
2096 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2097   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2098   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2099 
2100   switch (left->type()) {
2101     case T_INT: {
2102       switch (code) {
2103       case lir_shl:  __ lslw (dreg, lreg, count); break;
2104       case lir_shr:  __ asrw (dreg, lreg, count); break;
2105       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2106       default:
2107         ShouldNotReachHere();
2108         break;
2109       }
2110       break;
2111     case T_LONG:
2112     case T_ADDRESS:
2113     case T_OBJECT:
2114       switch (code) {
2115       case lir_shl:  __ lsl (dreg, lreg, count); break;
2116       case lir_shr:  __ asr (dreg, lreg, count); break;
2117       case lir_ushr: __ lsr (dreg, lreg, count); break;
2118       default:
2119         ShouldNotReachHere();
2120         break;
2121       }
2122       break;
2123     default:
2124       ShouldNotReachHere();
2125       break;
2126     }
2127   }
2128 }
2129 
2130 
2131 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2132   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2133   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2134   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2135   __ str (r, Address(sp, offset_from_rsp_in_bytes));
2136 }
2137 
2138 
2139 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2140   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2141   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2142   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2143   __ mov (rscratch1, c);
2144   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2145 }
2146 
2147 
2148 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2149   ShouldNotReachHere();
2150   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2151   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2152   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2153   __ lea(rscratch1, __ constant_oop_address(o));
2154   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2155 }
2156 
2157 
2158 // This code replaces a call to arraycopy; no exception may
2159 // be thrown in this code, they must be thrown in the System.arraycopy
2160 // activation frame; we could save some checks if this would not be the case
2161 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2162   ciArrayKlass* default_type = op->expected_type();
2163   Register src = op->src()->as_register();
2164   Register dst = op->dst()->as_register();
2165   Register src_pos = op->src_pos()->as_register();
2166   Register dst_pos = op->dst_pos()->as_register();
2167   Register length  = op->length()->as_register();
2168   Register tmp = op->tmp()->as_register();
2169 
2170   CodeStub* stub = op->stub();
2171   int flags = op->flags();
2172   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2173   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2174 
2175   // if we don't know anything, just go through the generic arraycopy
2176   if (default_type == NULL // || basic_type == T_OBJECT
2177       ) {
2178     Label done;
2179     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2180 
2181     // Save the arguments in case the generic arraycopy fails and we
2182     // have to fall back to the JNI stub
2183     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2184     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2185     __ str(src,              Address(sp, 4*BytesPerWord));
2186 
2187     address copyfunc_addr = StubRoutines::generic_arraycopy();
2188     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2189 
2190     // The arguments are in java calling convention so we shift them
2191     // to C convention
2192     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2193     __ mov(c_rarg0, j_rarg0);
2194     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2195     __ mov(c_rarg1, j_rarg1);
2196     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2197     __ mov(c_rarg2, j_rarg2);
2198     assert_different_registers(c_rarg3, j_rarg4);
2199     __ mov(c_rarg3, j_rarg3);
2200     __ mov(c_rarg4, j_rarg4);
2201 #ifndef PRODUCT
2202     if (PrintC1Statistics) {
2203       __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2204     }
2205 #endif
2206     __ far_call(RuntimeAddress(copyfunc_addr));
2207 
2208     __ cbz(r0, *stub->continuation());
2209 
2210     // Reload values from the stack so they are where the stub
2211     // expects them.
2212     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2213     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2214     __ ldr(src,              Address(sp, 4*BytesPerWord));
2215 
2216     // r0 is -1^K where K == partial copied count
2217     __ eonw(rscratch1, r0, 0);
2218     // adjust length down and src/end pos up by partial copied count
2219     __ subw(length, length, rscratch1);
2220     __ addw(src_pos, src_pos, rscratch1);
2221     __ addw(dst_pos, dst_pos, rscratch1);
2222     __ b(*stub->entry());
2223 
2224     __ bind(*stub->continuation());
2225     return;
2226   }
2227 
2228   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2229 
2230   int elem_size = type2aelembytes(basic_type);
2231   int shift_amount;
2232   int scale = exact_log2(elem_size);
2233 
2234   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2235   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2236   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2237   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2238 
2239   // test for NULL
2240   if (flags & LIR_OpArrayCopy::src_null_check) {
2241     __ cbz(src, *stub->entry());
2242   }
2243   if (flags & LIR_OpArrayCopy::dst_null_check) {
2244     __ cbz(dst, *stub->entry());
2245   }
2246 
2247   // If the compiler was not able to prove that exact type of the source or the destination
2248   // of the arraycopy is an array type, check at runtime if the source or the destination is
2249   // an instance type.
2250   if (flags & LIR_OpArrayCopy::type_check) {
2251     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2252       __ load_klass(tmp, dst);
2253       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2254       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2255       __ br(Assembler::GE, *stub->entry());
2256     }
2257 
2258     if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2259       __ load_klass(tmp, src);
2260       __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2261       __ cmpw(rscratch1, Klass::_lh_neutral_value);
2262       __ br(Assembler::GE, *stub->entry());
2263     }
2264   }
2265 
2266   // check if negative
2267   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2268     __ cmpw(src_pos, 0);
2269     __ br(Assembler::LT, *stub->entry());
2270   }
2271   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2272     __ cmpw(dst_pos, 0);
2273     __ br(Assembler::LT, *stub->entry());
2274   }
2275 
2276   if (flags & LIR_OpArrayCopy::length_positive_check) {
2277     __ cmpw(length, 0);
2278     __ br(Assembler::LT, *stub->entry());
2279   }
2280 
2281   if (flags & LIR_OpArrayCopy::src_range_check) {
2282     __ addw(tmp, src_pos, length);
2283     __ ldrw(rscratch1, src_length_addr);
2284     __ cmpw(tmp, rscratch1);
2285     __ br(Assembler::HI, *stub->entry());
2286   }
2287   if (flags & LIR_OpArrayCopy::dst_range_check) {
2288     __ addw(tmp, dst_pos, length);
2289     __ ldrw(rscratch1, dst_length_addr);
2290     __ cmpw(tmp, rscratch1);
2291     __ br(Assembler::HI, *stub->entry());
2292   }
2293 
2294   if (flags & LIR_OpArrayCopy::type_check) {
2295     // We don't know the array types are compatible
2296     if (basic_type != T_OBJECT) {
2297       // Simple test for basic type arrays
2298       if (UseCompressedClassPointers) {
2299         __ ldrw(tmp, src_klass_addr);
2300         __ ldrw(rscratch1, dst_klass_addr);
2301         __ cmpw(tmp, rscratch1);
2302       } else {
2303         __ ldr(tmp, src_klass_addr);
2304         __ ldr(rscratch1, dst_klass_addr);
2305         __ cmp(tmp, rscratch1);
2306       }
2307       __ br(Assembler::NE, *stub->entry());
2308     } else {
2309       // For object arrays, if src is a sub class of dst then we can
2310       // safely do the copy.
2311       Label cont, slow;
2312 
2313 #define PUSH(r1, r2)                                    \
2314       stp(r1, r2, __ pre(sp, -2 * wordSize));
2315 
2316 #define POP(r1, r2)                                     \
2317       ldp(r1, r2, __ post(sp, 2 * wordSize));
2318 
2319       __ PUSH(src, dst);
2320 
2321       __ load_klass(src, src);
2322       __ load_klass(dst, dst);
2323 
2324       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2325 
2326       __ PUSH(src, dst);
2327       __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2328       __ POP(src, dst);
2329 
2330       __ cbnz(src, cont);
2331 
2332       __ bind(slow);
2333       __ POP(src, dst);
2334 
2335       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2336       if (copyfunc_addr != NULL) { // use stub if available
2337         // src is not a sub class of dst so we have to do a
2338         // per-element check.
2339 
2340         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2341         if ((flags & mask) != mask) {
2342           // Check that at least both of them object arrays.
2343           assert(flags & mask, "one of the two should be known to be an object array");
2344 
2345           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2346             __ load_klass(tmp, src);
2347           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2348             __ load_klass(tmp, dst);
2349           }
2350           int lh_offset = in_bytes(Klass::layout_helper_offset());
2351           Address klass_lh_addr(tmp, lh_offset);
2352           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2353           __ ldrw(rscratch1, klass_lh_addr);
2354           __ mov(rscratch2, objArray_lh);
2355           __ eorw(rscratch1, rscratch1, rscratch2);
2356           __ cbnzw(rscratch1, *stub->entry());
2357         }
2358 
2359        // Spill because stubs can use any register they like and it's
2360        // easier to restore just those that we care about.
2361         __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2362         __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2363         __ str(src,              Address(sp, 4*BytesPerWord));
2364 
2365         __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2366         __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2367         assert_different_registers(c_rarg0, dst, dst_pos, length);
2368         __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2369         __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2370         assert_different_registers(c_rarg1, dst, length);
2371         __ uxtw(c_rarg2, length);
2372         assert_different_registers(c_rarg2, dst);
2373 
2374         __ load_klass(c_rarg4, dst);
2375         __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2376         __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2377         __ far_call(RuntimeAddress(copyfunc_addr));
2378 
2379 #ifndef PRODUCT
2380         if (PrintC1Statistics) {
2381           Label failed;
2382           __ cbnz(r0, failed);
2383           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2384           __ bind(failed);
2385         }
2386 #endif
2387 
2388         __ cbz(r0, *stub->continuation());
2389 
2390 #ifndef PRODUCT
2391         if (PrintC1Statistics) {
2392           __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2393         }
2394 #endif
2395         assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2396 
2397         // Restore previously spilled arguments
2398         __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2399         __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2400         __ ldr(src,              Address(sp, 4*BytesPerWord));
2401 
2402         // return value is -1^K where K is partial copied count
2403         __ eonw(rscratch1, r0, zr);
2404         // adjust length down and src/end pos up by partial copied count
2405         __ subw(length, length, rscratch1);
2406         __ addw(src_pos, src_pos, rscratch1);
2407         __ addw(dst_pos, dst_pos, rscratch1);
2408       }
2409 
2410       __ b(*stub->entry());
2411 
2412       __ bind(cont);
2413       __ POP(src, dst);
2414     }
2415   }
2416 
2417 #ifdef ASSERT
2418   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2419     // Sanity check the known type with the incoming class.  For the
2420     // primitive case the types must match exactly with src.klass and
2421     // dst.klass each exactly matching the default type.  For the
2422     // object array case, if no type check is needed then either the
2423     // dst type is exactly the expected type and the src type is a
2424     // subtype which we can't check or src is the same array as dst
2425     // but not necessarily exactly of type default_type.
2426     Label known_ok, halt;
2427     __ mov_metadata(tmp, default_type->constant_encoding());
2428     if (UseCompressedClassPointers) {
2429       __ encode_klass_not_null(tmp);
2430     }
2431 
2432     if (basic_type != T_OBJECT) {
2433 
2434       if (UseCompressedClassPointers) {
2435         __ ldrw(rscratch1, dst_klass_addr);
2436         __ cmpw(tmp, rscratch1);
2437       } else {
2438         __ ldr(rscratch1, dst_klass_addr);
2439         __ cmp(tmp, rscratch1);
2440       }
2441       __ br(Assembler::NE, halt);
2442       if (UseCompressedClassPointers) {
2443         __ ldrw(rscratch1, src_klass_addr);
2444         __ cmpw(tmp, rscratch1);
2445       } else {
2446         __ ldr(rscratch1, src_klass_addr);
2447         __ cmp(tmp, rscratch1);
2448       }
2449       __ br(Assembler::EQ, known_ok);
2450     } else {
2451       if (UseCompressedClassPointers) {
2452         __ ldrw(rscratch1, dst_klass_addr);
2453         __ cmpw(tmp, rscratch1);
2454       } else {
2455         __ ldr(rscratch1, dst_klass_addr);
2456         __ cmp(tmp, rscratch1);
2457       }
2458       __ br(Assembler::EQ, known_ok);
2459       __ cmp(src, dst);
2460       __ br(Assembler::EQ, known_ok);
2461     }
2462     __ bind(halt);
2463     __ stop("incorrect type information in arraycopy");
2464     __ bind(known_ok);
2465   }
2466 #endif
2467 
2468 #ifndef PRODUCT
2469   if (PrintC1Statistics) {
2470     __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2471   }
2472 #endif
2473 
2474   __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2475   __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2476   assert_different_registers(c_rarg0, dst, dst_pos, length);
2477   __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2478   __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2479   assert_different_registers(c_rarg1, dst, length);
2480   __ uxtw(c_rarg2, length);
2481   assert_different_registers(c_rarg2, dst);
2482 
2483   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2484   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2485   const char *name;
2486   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2487 
2488  CodeBlob *cb = CodeCache::find_blob(entry);
2489  if (cb) {
2490    __ far_call(RuntimeAddress(entry));
2491  } else {
2492    __ call_VM_leaf(entry, 3);
2493  }
2494 
2495   __ bind(*stub->continuation());
2496 }
2497 
2498 
2499 
2500 
2501 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2502   Register obj = op->obj_opr()->as_register();  // may not be an oop
2503   Register hdr = op->hdr_opr()->as_register();
2504   Register lock = op->lock_opr()->as_register();
2505   if (!UseFastLocking) {
2506     __ b(*op->stub()->entry());
2507   } else if (op->code() == lir_lock) {
2508     Register scratch = noreg;
2509     if (UseBiasedLocking) {
2510       scratch = op->scratch_opr()->as_register();
2511     }
2512     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2513     // add debug info for NullPointerException only if one is possible
2514     int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2515     if (op->info() != NULL) {
2516       add_debug_info_for_null_check(null_check_offset, op->info());
2517     }
2518     // done
2519   } else if (op->code() == lir_unlock) {
2520     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2521     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2522   } else {
2523     Unimplemented();
2524   }
2525   __ bind(*op->stub()->continuation());
2526 }
2527 
2528 
2529 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2530   ciMethod* method = op->profiled_method();
2531   int bci          = op->profiled_bci();
2532   ciMethod* callee = op->profiled_callee();
2533 
2534   // Update counter for all call types
2535   ciMethodData* md = method->method_data_or_null();
2536   assert(md != NULL, "Sanity");
2537   ciProfileData* data = md->bci_to_data(bci);
2538   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2539   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2540   Register mdo  = op->mdo()->as_register();
2541   __ mov_metadata(mdo, md->constant_encoding());
2542   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2543   // Perform additional virtual call profiling for invokevirtual and
2544   // invokeinterface bytecodes
2545   if (op->should_profile_receiver_type()) {
2546     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2547     Register recv = op->recv()->as_register();
2548     assert_different_registers(mdo, recv);
2549     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2550     ciKlass* known_klass = op->known_holder();
2551     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2552       // We know the type that will be seen at this call site; we can
2553       // statically update the MethodData* rather than needing to do
2554       // dynamic tests on the receiver type
2555 
2556       // NOTE: we should probably put a lock around this search to
2557       // avoid collisions by concurrent compilations
2558       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2559       uint i;
2560       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2561         ciKlass* receiver = vc_data->receiver(i);
2562         if (known_klass->equals(receiver)) {
2563           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2564           __ addptr(data_addr, DataLayout::counter_increment);
2565           return;
2566         }
2567       }
2568 
2569       // Receiver type not found in profile data; select an empty slot
2570 
2571       // Note that this is less efficient than it should be because it
2572       // always does a write to the receiver part of the
2573       // VirtualCallData rather than just the first time
2574       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2575         ciKlass* receiver = vc_data->receiver(i);
2576         if (receiver == NULL) {
2577           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2578           __ mov_metadata(rscratch1, known_klass->constant_encoding());
2579           __ lea(rscratch2, recv_addr);
2580           __ str(rscratch1, Address(rscratch2));
2581           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2582           __ addptr(data_addr, DataLayout::counter_increment);
2583           return;
2584         }
2585       }
2586     } else {
2587       __ load_klass(recv, recv);
2588       Label update_done;
2589       type_profile_helper(mdo, md, data, recv, &update_done);
2590       // Receiver did not match any saved receiver and there is no empty row for it.
2591       // Increment total counter to indicate polymorphic case.
2592       __ addptr(counter_addr, DataLayout::counter_increment);
2593 
2594       __ bind(update_done);
2595     }
2596   } else {
2597     // Static call
2598     __ addptr(counter_addr, DataLayout::counter_increment);
2599   }
2600 }
2601 
2602 
2603 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2604   Unimplemented();
2605 }
2606 
2607 
2608 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2609   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2610 }
2611 
2612 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2613   assert(op->crc()->is_single_cpu(),  "crc must be register");
2614   assert(op->val()->is_single_cpu(),  "byte value must be register");
2615   assert(op->result_opr()->is_single_cpu(), "result must be register");
2616   Register crc = op->crc()->as_register();
2617   Register val = op->val()->as_register();
2618   Register res = op->result_opr()->as_register();
2619 
2620   assert_different_registers(val, crc, res);
2621   unsigned long offset;
2622   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2623   if (offset) __ add(res, res, offset);
2624 
2625   __ mvnw(crc, crc); // ~crc
2626   __ update_byte_crc32(crc, val, res);
2627   __ mvnw(res, crc); // ~crc
2628 }
2629 
2630 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2631   COMMENT("emit_profile_type {");
2632   Register obj = op->obj()->as_register();
2633   Register tmp = op->tmp()->as_pointer_register();
2634   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2635   ciKlass* exact_klass = op->exact_klass();
2636   intptr_t current_klass = op->current_klass();
2637   bool not_null = op->not_null();
2638   bool no_conflict = op->no_conflict();
2639 
2640   Label update, next, none;
2641 
2642   bool do_null = !not_null;
2643   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2644   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2645 
2646   assert(do_null || do_update, "why are we here?");
2647   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2648   assert(mdo_addr.base() != rscratch1, "wrong register");
2649 
2650   __ verify_oop(obj);
2651 
2652   if (tmp != obj) {
2653     __ mov(tmp, obj);
2654   }
2655   if (do_null) {
2656     __ cbnz(tmp, update);
2657     if (!TypeEntries::was_null_seen(current_klass)) {
2658       __ ldr(rscratch2, mdo_addr);
2659       __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2660       __ str(rscratch2, mdo_addr);
2661     }
2662     if (do_update) {
2663 #ifndef ASSERT
2664       __ b(next);
2665     }
2666 #else
2667       __ b(next);
2668     }
2669   } else {
2670     __ cbnz(tmp, update);
2671     __ stop("unexpected null obj");
2672 #endif
2673   }
2674 
2675   __ bind(update);
2676 
2677   if (do_update) {
2678 #ifdef ASSERT
2679     if (exact_klass != NULL) {
2680       Label ok;
2681       __ load_klass(tmp, tmp);
2682       __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2683       __ eor(rscratch1, tmp, rscratch1);
2684       __ cbz(rscratch1, ok);
2685       __ stop("exact klass and actual klass differ");
2686       __ bind(ok);
2687     }
2688 #endif
2689     if (!no_conflict) {
2690       if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2691         if (exact_klass != NULL) {
2692           __ mov_metadata(tmp, exact_klass->constant_encoding());
2693         } else {
2694           __ load_klass(tmp, tmp);
2695         }
2696 
2697         __ ldr(rscratch2, mdo_addr);
2698         __ eor(tmp, tmp, rscratch2);
2699         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2700         // klass seen before, nothing to do. The unknown bit may have been
2701         // set already but no need to check.
2702         __ cbz(rscratch1, next);
2703 
2704         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2705 
2706         if (TypeEntries::is_type_none(current_klass)) {
2707           __ cbz(rscratch2, none);
2708           __ cmp(rscratch2, TypeEntries::null_seen);
2709           __ br(Assembler::EQ, none);
2710           // There is a chance that the checks above (re-reading profiling
2711           // data from memory) fail if another thread has just set the
2712           // profiling to this obj's klass
2713           __ dmb(Assembler::ISHLD);
2714           __ ldr(rscratch2, mdo_addr);
2715           __ eor(tmp, tmp, rscratch2);
2716           __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2717           __ cbz(rscratch1, next);
2718         }
2719       } else {
2720         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2721                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2722 
2723         __ ldr(tmp, mdo_addr);
2724         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2725       }
2726 
2727       // different than before. Cannot keep accurate profile.
2728       __ ldr(rscratch2, mdo_addr);
2729       __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2730       __ str(rscratch2, mdo_addr);
2731 
2732       if (TypeEntries::is_type_none(current_klass)) {
2733         __ b(next);
2734 
2735         __ bind(none);
2736         // first time here. Set profile type.
2737         __ str(tmp, mdo_addr);
2738       }
2739     } else {
2740       // There's a single possible klass at this profile point
2741       assert(exact_klass != NULL, "should be");
2742       if (TypeEntries::is_type_none(current_klass)) {
2743         __ mov_metadata(tmp, exact_klass->constant_encoding());
2744         __ ldr(rscratch2, mdo_addr);
2745         __ eor(tmp, tmp, rscratch2);
2746         __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2747         __ cbz(rscratch1, next);
2748 #ifdef ASSERT
2749         {
2750           Label ok;
2751           __ ldr(rscratch1, mdo_addr);
2752           __ cbz(rscratch1, ok);
2753           __ cmp(rscratch1, TypeEntries::null_seen);
2754           __ br(Assembler::EQ, ok);
2755           // may have been set by another thread
2756           __ dmb(Assembler::ISHLD);
2757           __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2758           __ ldr(rscratch2, mdo_addr);
2759           __ eor(rscratch2, rscratch1, rscratch2);
2760           __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2761           __ cbz(rscratch2, ok);
2762 
2763           __ stop("unexpected profiling mismatch");
2764           __ bind(ok);
2765         }
2766 #endif
2767         // first time here. Set profile type.
2768         __ ldr(tmp, mdo_addr);
2769       } else {
2770         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2771                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2772 
2773         __ ldr(tmp, mdo_addr);
2774         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2775 
2776         __ orr(tmp, tmp, TypeEntries::type_unknown);
2777         __ str(tmp, mdo_addr);
2778         // FIXME: Write barrier needed here?
2779       }
2780     }
2781 
2782     __ bind(next);
2783   }
2784   COMMENT("} emit_profile_type");
2785 }
2786 
2787 
2788 void LIR_Assembler::align_backward_branch_target() {
2789 }
2790 
2791 
2792 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2793   // tmp must be unused
2794   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2795 
2796   if (left->is_single_cpu()) {
2797     assert(dest->is_single_cpu(), "expect single result reg");
2798     __ negw(dest->as_register(), left->as_register());
2799   } else if (left->is_double_cpu()) {
2800     assert(dest->is_double_cpu(), "expect double result reg");
2801     __ neg(dest->as_register_lo(), left->as_register_lo());
2802   } else if (left->is_single_fpu()) {
2803     assert(dest->is_single_fpu(), "expect single float result reg");
2804     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2805   } else {
2806     assert(left->is_double_fpu(), "expect double float operand reg");
2807     assert(dest->is_double_fpu(), "expect double float result reg");
2808     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2809   }
2810 }
2811 
2812 
2813 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2814   assert(patch_code == lir_patch_none, "Patch code not supported");
2815   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2816 }
2817 
2818 
2819 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2820   assert(!tmp->is_valid(), "don't need temporary");
2821 
2822   CodeBlob *cb = CodeCache::find_blob(dest);
2823   if (cb) {
2824     __ far_call(RuntimeAddress(dest));
2825   } else {
2826     __ mov(rscratch1, RuntimeAddress(dest));
2827     int len = args->length();
2828     int type = 0;
2829     if (! result->is_illegal()) {
2830       switch (result->type()) {
2831       case T_VOID:
2832         type = 0;
2833         break;
2834       case T_INT:
2835       case T_LONG:
2836       case T_OBJECT:
2837         type = 1;
2838         break;
2839       case T_FLOAT:
2840         type = 2;
2841         break;
2842       case T_DOUBLE:
2843         type = 3;
2844         break;
2845       default:
2846         ShouldNotReachHere();
2847         break;
2848       }
2849     }
2850     int num_gpargs = 0;
2851     int num_fpargs = 0;
2852     for (int i = 0; i < args->length(); i++) {
2853       LIR_Opr arg = args->at(i);
2854       if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) {
2855         num_fpargs++;
2856       } else {
2857         num_gpargs++;
2858       }
2859     }
2860     __ blrt(rscratch1, num_gpargs, num_fpargs, type);
2861   }
2862 
2863   if (info != NULL) {
2864     add_call_info_here(info);
2865   }
2866   __ maybe_isb();
2867 }
2868 
2869 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2870   if (dest->is_address() || src->is_address()) {
2871     move_op(src, dest, type, lir_patch_none, info,
2872             /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
2873   } else {
2874     ShouldNotReachHere();
2875   }
2876 }
2877 
2878 #ifdef ASSERT
2879 // emit run-time assertion
2880 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2881   assert(op->code() == lir_assert, "must be");
2882 
2883   if (op->in_opr1()->is_valid()) {
2884     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2885     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2886   } else {
2887     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2888     assert(op->condition() == lir_cond_always, "no other conditions allowed");
2889   }
2890 
2891   Label ok;
2892   if (op->condition() != lir_cond_always) {
2893     Assembler::Condition acond = Assembler::AL;
2894     switch (op->condition()) {
2895       case lir_cond_equal:        acond = Assembler::EQ;  break;
2896       case lir_cond_notEqual:     acond = Assembler::NE;  break;
2897       case lir_cond_less:         acond = Assembler::LT;  break;
2898       case lir_cond_lessEqual:    acond = Assembler::LE;  break;
2899       case lir_cond_greaterEqual: acond = Assembler::GE;  break;
2900       case lir_cond_greater:      acond = Assembler::GT;  break;
2901       case lir_cond_belowEqual:   acond = Assembler::LS;  break;
2902       case lir_cond_aboveEqual:   acond = Assembler::HS;  break;
2903       default:                    ShouldNotReachHere();
2904     }
2905     __ br(acond, ok);
2906   }
2907   if (op->halt()) {
2908     const char* str = __ code_string(op->msg());
2909     __ stop(str);
2910   } else {
2911     breakpoint();
2912   }
2913   __ bind(ok);
2914 }
2915 #endif
2916 
2917 #ifndef PRODUCT
2918 #define COMMENT(x)   do { __ block_comment(x); } while (0)
2919 #else
2920 #define COMMENT(x)
2921 #endif
2922 
2923 void LIR_Assembler::membar() {
2924   COMMENT("membar");
2925   __ membar(MacroAssembler::AnyAny);
2926 }
2927 
2928 void LIR_Assembler::membar_acquire() {
2929   __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2930 }
2931 
2932 void LIR_Assembler::membar_release() {
2933   __ membar(Assembler::LoadStore|Assembler::StoreStore);
2934 }
2935 
2936 void LIR_Assembler::membar_loadload() {
2937   __ membar(Assembler::LoadLoad);
2938 }
2939 
2940 void LIR_Assembler::membar_storestore() {
2941   __ membar(MacroAssembler::StoreStore);
2942 }
2943 
2944 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2945 
2946 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2947 
2948 void LIR_Assembler::on_spin_wait() {
2949   Unimplemented();
2950 }
2951 
2952 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2953   __ mov(result_reg->as_register(), rthread);
2954 }
2955 
2956 
2957 void LIR_Assembler::peephole(LIR_List *lir) {
2958 #if 0
2959   if (tableswitch_count >= max_tableswitches)
2960     return;
2961 
2962   /*
2963     This finite-state automaton recognizes sequences of compare-and-
2964     branch instructions.  We will turn them into a tableswitch.  You
2965     could argue that C1 really shouldn't be doing this sort of
2966     optimization, but without it the code is really horrible.
2967   */
2968 
2969   enum { start_s, cmp1_s, beq_s, cmp_s } state;
2970   int first_key, last_key = -2147483648;
2971   int next_key = 0;
2972   int start_insn = -1;
2973   int last_insn = -1;
2974   Register reg = noreg;
2975   LIR_Opr reg_opr;
2976   state = start_s;
2977 
2978   LIR_OpList* inst = lir->instructions_list();
2979   for (int i = 0; i < inst->length(); i++) {
2980     LIR_Op* op = inst->at(i);
2981     switch (state) {
2982     case start_s:
2983       first_key = -1;
2984       start_insn = i;
2985       switch (op->code()) {
2986       case lir_cmp:
2987         LIR_Opr opr1 = op->as_Op2()->in_opr1();
2988         LIR_Opr opr2 = op->as_Op2()->in_opr2();
2989         if (opr1->is_cpu_register() && opr1->is_single_cpu()
2990             && opr2->is_constant()
2991             && opr2->type() == T_INT) {
2992           reg_opr = opr1;
2993           reg = opr1->as_register();
2994           first_key = opr2->as_constant_ptr()->as_jint();
2995           next_key = first_key + 1;
2996           state = cmp_s;
2997           goto next_state;
2998         }
2999         break;
3000       }
3001       break;
3002     case cmp_s:
3003       switch (op->code()) {
3004       case lir_branch:
3005         if (op->as_OpBranch()->cond() == lir_cond_equal) {
3006           state = beq_s;
3007           last_insn = i;
3008           goto next_state;
3009         }
3010       }
3011       state = start_s;
3012       break;
3013     case beq_s:
3014       switch (op->code()) {
3015       case lir_cmp: {
3016         LIR_Opr opr1 = op->as_Op2()->in_opr1();
3017         LIR_Opr opr2 = op->as_Op2()->in_opr2();
3018         if (opr1->is_cpu_register() && opr1->is_single_cpu()
3019             && opr1->as_register() == reg
3020             && opr2->is_constant()
3021             && opr2->type() == T_INT
3022             && opr2->as_constant_ptr()->as_jint() == next_key) {
3023           last_key = next_key;
3024           next_key++;
3025           state = cmp_s;
3026           goto next_state;
3027         }
3028       }
3029       }
3030       last_key = next_key;
3031       state = start_s;
3032       break;
3033     default:
3034       assert(false, "impossible state");
3035     }
3036     if (state == start_s) {
3037       if (first_key < last_key - 5L && reg != noreg) {
3038         {
3039           // printf("found run register %d starting at insn %d low value %d high value %d\n",
3040           //        reg->encoding(),
3041           //        start_insn, first_key, last_key);
3042           //   for (int i = 0; i < inst->length(); i++) {
3043           //     inst->at(i)->print();
3044           //     tty->print("\n");
3045           //   }
3046           //   tty->print("\n");
3047         }
3048 
3049         struct tableswitch *sw = &switches[tableswitch_count];
3050         sw->_insn_index = start_insn, sw->_first_key = first_key,
3051           sw->_last_key = last_key, sw->_reg = reg;
3052         inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3053         {
3054           // Insert the new table of branches
3055           int offset = last_insn;
3056           for (int n = first_key; n < last_key; n++) {
3057             inst->insert_before
3058               (last_insn + 1,
3059                new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3060                                 inst->at(offset)->as_OpBranch()->label()));
3061             offset -= 2, i++;
3062           }
3063         }
3064         // Delete all the old compare-and-branch instructions
3065         for (int n = first_key; n < last_key; n++) {
3066           inst->remove_at(start_insn);
3067           inst->remove_at(start_insn);
3068         }
3069         // Insert the tableswitch instruction
3070         inst->insert_before(start_insn,
3071                             new LIR_Op2(lir_cmp, lir_cond_always,
3072                                         LIR_OprFact::intConst(tableswitch_count),
3073                                         reg_opr));
3074         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3075         tableswitch_count++;
3076       }
3077       reg = noreg;
3078       last_key = -2147483648;
3079     }
3080   next_state:
3081     ;
3082   }
3083 #endif
3084 }
3085 
3086 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3087   Address addr = as_Address(src->as_address_ptr());
3088   BasicType type = src->type();
3089   bool is_oop = type == T_OBJECT || type == T_ARRAY;
3090 
3091   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3092   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3093 
3094   switch(type) {
3095   case T_INT:
3096     xchg = &MacroAssembler::atomic_xchgalw;
3097     add = &MacroAssembler::atomic_addalw;
3098     break;
3099   case T_LONG:
3100     xchg = &MacroAssembler::atomic_xchgal;
3101     add = &MacroAssembler::atomic_addal;
3102     break;
3103   case T_OBJECT:
3104   case T_ARRAY:
3105     if (UseCompressedOops) {
3106       xchg = &MacroAssembler::atomic_xchgalw;
3107       add = &MacroAssembler::atomic_addalw;
3108     } else {
3109       xchg = &MacroAssembler::atomic_xchgal;
3110       add = &MacroAssembler::atomic_addal;
3111     }
3112     break;
3113   default:
3114     ShouldNotReachHere();
3115     xchg = &MacroAssembler::atomic_xchgal;
3116     add = &MacroAssembler::atomic_addal; // unreachable
3117   }
3118 
3119   switch (code) {
3120   case lir_xadd:
3121     {
3122       RegisterOrConstant inc;
3123       Register tmp = as_reg(tmp_op);
3124       Register dst = as_reg(dest);
3125       if (data->is_constant()) {
3126         inc = RegisterOrConstant(as_long(data));
3127         assert_different_registers(dst, addr.base(), tmp,
3128                                    rscratch1, rscratch2);
3129       } else {
3130         inc = RegisterOrConstant(as_reg(data));
3131         assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3132                                    rscratch1, rscratch2);
3133       }
3134       __ lea(tmp, addr);
3135       (_masm->*add)(dst, inc, tmp);
3136       break;
3137     }
3138   case lir_xchg:
3139     {
3140       Register tmp = tmp_op->as_register();
3141       Register obj = as_reg(data);
3142       Register dst = as_reg(dest);
3143       if (is_oop && UseCompressedOops) {
3144         __ encode_heap_oop(rscratch2, obj);
3145         obj = rscratch2;
3146       }
3147       assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3148       __ lea(tmp, addr);
3149       (_masm->*xchg)(dst, obj, tmp);
3150       if (is_oop && UseCompressedOops) {
3151         __ decode_heap_oop(dst);
3152       }
3153     }
3154     break;
3155   default:
3156     ShouldNotReachHere();
3157   }
3158   __ membar(__ AnyAny);
3159 }
3160 
3161 #undef __