1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gc_globals.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_x86.inline.hpp"
  46 
  47 
  48 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  49 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  50 // fast versions of NegF/NegD and AbsF/AbsD.
  51 
  52 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  53 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  54   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  55   // of 128-bits operands for SSE instructions.
  56   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  57   // Store the value to a 128-bits operand.
  58   operand[0] = lo;
  59   operand[1] = hi;
  60   return operand;
  61 }
  62 
  63 // Buffer for 128-bits masks used by SSE instructions.
  64 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  65 
  66 // Static initialization during VM startup.
  67 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  68 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  69 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  70 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  71 
  72 
  73 NEEDS_CLEANUP // remove this definitions ?
  74 const Register SYNC_header = rax;   // synchronization header
  75 const Register SHIFT_count = rcx;   // where count for shift operations must be
  76 
  77 #define __ _masm->
  78 
  79 
  80 static void select_different_registers(Register preserve,
  81                                        Register extra,
  82                                        Register &tmp1,
  83                                        Register &tmp2) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, extra);
  89     tmp2 = extra;
  90   }
  91   assert_different_registers(preserve, tmp1, tmp2);
  92 }
  93 
  94 
  95 
  96 static void select_different_registers(Register preserve,
  97                                        Register extra,
  98                                        Register &tmp1,
  99                                        Register &tmp2,
 100                                        Register &tmp3) {
 101   if (tmp1 == preserve) {
 102     assert_different_registers(tmp1, tmp2, tmp3, extra);
 103     tmp1 = extra;
 104   } else if (tmp2 == preserve) {
 105     assert_different_registers(tmp1, tmp2, tmp3, extra);
 106     tmp2 = extra;
 107   } else if (tmp3 == preserve) {
 108     assert_different_registers(tmp1, tmp2, tmp3, extra);
 109     tmp3 = extra;
 110   }
 111   assert_different_registers(preserve, tmp1, tmp2, tmp3);
 112 }
 113 
 114 
 115 
 116 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
 117   if (opr->is_constant()) {
 118     LIR_Const* constant = opr->as_constant_ptr();
 119     switch (constant->type()) {
 120       case T_INT: {
 121         return true;
 122       }
 123 
 124       default:
 125         return false;
 126     }
 127   }
 128   return false;
 129 }
 130 
 131 
 132 LIR_Opr LIR_Assembler::receiverOpr() {
 133   return FrameMap::receiver_opr;
 134 }
 135 
 136 LIR_Opr LIR_Assembler::osrBufferPointer() {
 137   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 138 }
 139 
 140 //--------------fpu register translations-----------------------
 141 
 142 
 143 address LIR_Assembler::float_constant(float f) {
 144   address const_addr = __ float_constant(f);
 145   if (const_addr == nullptr) {
 146     bailout("const section overflow");
 147     return __ code()->consts()->start();
 148   } else {
 149     return const_addr;
 150   }
 151 }
 152 
 153 
 154 address LIR_Assembler::double_constant(double d) {
 155   address const_addr = __ double_constant(d);
 156   if (const_addr == nullptr) {
 157     bailout("const section overflow");
 158     return __ code()->consts()->start();
 159   } else {
 160     return const_addr;
 161   }
 162 }
 163 
 164 void LIR_Assembler::breakpoint() {
 165   __ int3();
 166 }
 167 
 168 void LIR_Assembler::push(LIR_Opr opr) {
 169   if (opr->is_single_cpu()) {
 170     __ push_reg(opr->as_register());
 171   } else if (opr->is_double_cpu()) {
 172     __ push_reg(opr->as_register_lo());
 173   } else if (opr->is_stack()) {
 174     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 175   } else if (opr->is_constant()) {
 176     LIR_Const* const_opr = opr->as_constant_ptr();
 177     if (const_opr->type() == T_OBJECT) {
 178       __ push_oop(const_opr->as_jobject(), rscratch1);
 179     } else if (const_opr->type() == T_INT) {
 180       __ push_jint(const_opr->as_jint());
 181     } else {
 182       ShouldNotReachHere();
 183     }
 184 
 185   } else {
 186     ShouldNotReachHere();
 187   }
 188 }
 189 
 190 void LIR_Assembler::pop(LIR_Opr opr) {
 191   if (opr->is_single_cpu()) {
 192     __ pop_reg(opr->as_register());
 193   } else {
 194     ShouldNotReachHere();
 195   }
 196 }
 197 
 198 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
 199   return addr->base()->is_illegal() && addr->index()->is_illegal();
 200 }
 201 
 202 //-------------------------------------------
 203 
 204 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 205   return as_Address(addr, rscratch1);
 206 }
 207 
 208 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 209   if (addr->base()->is_illegal()) {
 210     assert(addr->index()->is_illegal(), "must be illegal too");
 211     AddressLiteral laddr((address)addr->disp(), relocInfo::none);
 212     if (! __ reachable(laddr)) {
 213       __ movptr(tmp, laddr.addr());
 214       Address res(tmp, 0);
 215       return res;
 216     } else {
 217       return __ as_Address(laddr);
 218     }
 219   }
 220 
 221   Register base = addr->base()->as_pointer_register();
 222 
 223   if (addr->index()->is_illegal()) {
 224     return Address( base, addr->disp());
 225   } else if (addr->index()->is_cpu_register()) {
 226     Register index = addr->index()->as_pointer_register();
 227     return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
 228   } else if (addr->index()->is_constant()) {
 229     intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
 230     assert(Assembler::is_simm32(addr_offset), "must be");
 231 
 232     return Address(base, addr_offset);
 233   } else {
 234     Unimplemented();
 235     return Address();
 236   }
 237 }
 238 
 239 
 240 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 241   Address base = as_Address(addr);
 242   return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
 243 }
 244 
 245 
 246 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 247   return as_Address(addr);
 248 }
 249 
 250 
 251 void LIR_Assembler::osr_entry() {
 252   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 253   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 254   ValueStack* entry_state = osr_entry->state();
 255   int number_of_locks = entry_state->locks_size();
 256 
 257   // we jump here if osr happens with the interpreter
 258   // state set up to continue at the beginning of the
 259   // loop that triggered osr - in particular, we have
 260   // the following registers setup:
 261   //
 262   // rcx: osr buffer
 263   //
 264 
 265   // build frame
 266   ciMethod* m = compilation()->method();
 267   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 268 
 269   // OSR buffer is
 270   //
 271   // locals[nlocals-1..0]
 272   // monitors[0..number_of_locks]
 273   //
 274   // locals is a direct copy of the interpreter frame so in the osr buffer
 275   // so first slot in the local array is the last local from the interpreter
 276   // and last slot is local[0] (receiver) from the interpreter
 277   //
 278   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 279   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 280   // in the interpreter frame (the method lock if a sync method)
 281 
 282   // Initialize monitors in the compiled activation.
 283   //   rcx: pointer to osr buffer
 284   //
 285   // All other registers are dead at this point and the locals will be
 286   // copied into place by code emitted in the IR.
 287 
 288   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 289   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 290     int monitor_offset = BytesPerWord * method()->max_locals() +
 291       (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
 292     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 293     // the OSR buffer using 2 word entries: first the lock and then
 294     // the oop.
 295     for (int i = 0; i < number_of_locks; i++) {
 296       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 297 #ifdef ASSERT
 298       // verify the interpreter's monitor has a non-null object
 299       {
 300         Label L;
 301         __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
 302         __ jcc(Assembler::notZero, L);
 303         __ stop("locked object is null");
 304         __ bind(L);
 305       }
 306 #endif
 307       __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
 308       __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
 309       __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 310       __ movptr(frame_map()->address_for_monitor_object(i), rbx);
 311     }
 312   }
 313 }
 314 
 315 
 316 // inline cache check; done before the frame is built.
 317 int LIR_Assembler::check_icache() {
 318   return __ ic_check(CodeEntryAlignment);
 319 }
 320 
 321 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 322   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 323   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
 324 
 325   Label L_skip_barrier;
 326   Register klass = rscratch1;
 327 
 328   __ mov_metadata(klass, method->holder()->constant_encoding());
 329   __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
 330 
 331   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 332 
 333   __ bind(L_skip_barrier);
 334 }
 335 
 336 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
 337   jobject o = nullptr;
 338   PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
 339   __ movoop(reg, o);
 340   patching_epilog(patch, lir_patch_normal, reg, info);
 341 }
 342 
 343 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 344   Metadata* o = nullptr;
 345   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
 346   __ mov_metadata(reg, o);
 347   patching_epilog(patch, lir_patch_normal, reg, info);
 348 }
 349 
 350 // This specifies the rsp decrement needed to build the frame
 351 int LIR_Assembler::initial_frame_size_in_bytes() const {
 352   // if rounding, must let FrameMap know!
 353 
 354   // The frame_map records size in slots (32bit word)
 355 
 356   // subtract two words to account for return address and link
 357   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 358 }
 359 
 360 
 361 int LIR_Assembler::emit_exception_handler() {
 362   // generate code for exception handler
 363   address handler_base = __ start_a_stub(exception_handler_size());
 364   if (handler_base == nullptr) {
 365     // not enough space left for the handler
 366     bailout("exception handler overflow");
 367     return -1;
 368   }
 369 
 370   int offset = code_offset();
 371 
 372   // the exception oop and pc are in rax, and rdx
 373   // no other registers need to be preserved, so invalidate them
 374   __ invalidate_registers(false, true, true, false, true, true);
 375 
 376   // check that there is really an exception
 377   __ verify_not_null_oop(rax);
 378 
 379   // search an exception handler (rax: exception oop, rdx: throwing pc)
 380   __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
 381   __ should_not_reach_here();
 382   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 383   __ end_a_stub();
 384 
 385   return offset;
 386 }
 387 
 388 
 389 // Emit the code to remove the frame from the stack in the exception
 390 // unwind path.
 391 int LIR_Assembler::emit_unwind_handler() {
 392 #ifndef PRODUCT
 393   if (CommentedAssembly) {
 394     _masm->block_comment("Unwind handler");
 395   }
 396 #endif
 397 
 398   int offset = code_offset();
 399 
 400   // Fetch the exception from TLS and clear out exception related thread state
 401   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
 402   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
 403   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
 404 
 405   __ bind(_unwind_handler_entry);
 406   __ verify_not_null_oop(rax);
 407   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 408     __ mov(rbx, rax);  // Preserve the exception (rbx is always callee-saved)
 409   }
 410 
 411   // Perform needed unlocking
 412   MonitorExitStub* stub = nullptr;
 413   if (method()->is_synchronized()) {
 414     monitor_address(0, FrameMap::rax_opr);
 415     stub = new MonitorExitStub(FrameMap::rax_opr, 0);
 416     __ unlock_object(rdi, rsi, rax, *stub->entry());
 417     __ bind(*stub->continuation());
 418   }
 419 
 420   if (compilation()->env()->dtrace_method_probes()) {
 421     __ mov(rdi, r15_thread);
 422     __ mov_metadata(rsi, method()->constant_encoding());
 423     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 424   }
 425 
 426   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 427     __ mov(rax, rbx);  // Restore the exception
 428   }
 429 
 430   // remove the activation and dispatch to the unwind handler
 431   __ remove_frame(initial_frame_size_in_bytes());
 432   __ jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
 433 
 434   // Emit the slow path assembly
 435   if (stub != nullptr) {
 436     stub->emit_code(this);
 437   }
 438 
 439   return offset;
 440 }
 441 
 442 
 443 int LIR_Assembler::emit_deopt_handler() {
 444   // generate code for exception handler
 445   address handler_base = __ start_a_stub(deopt_handler_size());
 446   if (handler_base == nullptr) {
 447     // not enough space left for the handler
 448     bailout("deopt handler overflow");
 449     return -1;
 450   }
 451 
 452   int offset = code_offset();
 453 
 454   Label start;
 455   __ bind(start);
 456 
 457   __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 458 
 459   int entry_offset = __ offset();
 460 
 461   __ jmp(start);
 462 
 463   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 464   assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
 465          "out of bounds read in post-call NOP check");
 466   __ end_a_stub();
 467 
 468   return entry_offset;
 469 }
 470 
 471 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 472   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 473   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 474     assert(result->fpu() == 0, "result must already be on TOS");
 475   }
 476 
 477   // Pop the stack before the safepoint code
 478   __ remove_frame(initial_frame_size_in_bytes());
 479 
 480   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 481     __ reserved_stack_check();
 482   }
 483 
 484   // Note: we do not need to round double result; float result has the right precision
 485   // the poll sets the condition code, but no data registers
 486 
 487   code_stub->set_safepoint_offset(__ offset());
 488   __ relocate(relocInfo::poll_return_type);
 489   __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
 490   __ ret(0);
 491 }
 492 
 493 
 494 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 495   guarantee(info != nullptr, "Shouldn't be null");
 496   int offset = __ offset();
 497   const Register poll_addr = rscratch1;
 498   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 499   add_debug_info_for_branch(info);
 500   __ relocate(relocInfo::poll_type);
 501   address pre_pc = __ pc();
 502   __ testl(rax, Address(poll_addr, 0));
 503   address post_pc = __ pc();
 504   guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
 505   return offset;
 506 }
 507 
 508 
 509 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 510   if (from_reg != to_reg) __ mov(to_reg, from_reg);
 511 }
 512 
 513 void LIR_Assembler::swap_reg(Register a, Register b) {
 514   __ xchgptr(a, b);
 515 }
 516 
 517 
 518 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 519   assert(src->is_constant(), "should not call otherwise");
 520   assert(dest->is_register(), "should not call otherwise");
 521   LIR_Const* c = src->as_constant_ptr();
 522 
 523   switch (c->type()) {
 524     case T_INT: {
 525       assert(patch_code == lir_patch_none, "no patching handled here");
 526       __ movl(dest->as_register(), c->as_jint());
 527       break;
 528     }
 529 
 530     case T_ADDRESS: {
 531       assert(patch_code == lir_patch_none, "no patching handled here");
 532       __ movptr(dest->as_register(), c->as_jint());
 533       break;
 534     }
 535 
 536     case T_LONG: {
 537       assert(patch_code == lir_patch_none, "no patching handled here");
 538       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 539       break;
 540     }
 541 
 542     case T_OBJECT: {
 543       if (patch_code != lir_patch_none) {
 544         jobject2reg_with_patching(dest->as_register(), info);
 545       } else {
 546         __ movoop(dest->as_register(), c->as_jobject());
 547       }
 548       break;
 549     }
 550 
 551     case T_METADATA: {
 552       if (patch_code != lir_patch_none) {
 553         klass2reg_with_patching(dest->as_register(), info);
 554       } else {
 555         __ mov_metadata(dest->as_register(), c->as_metadata());
 556       }
 557       break;
 558     }
 559 
 560     case T_FLOAT: {
 561       if (dest->is_single_xmm()) {
 562         if (UseAVX <= 2 && c->is_zero_float()) {
 563           __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
 564         } else {
 565           __ movflt(dest->as_xmm_float_reg(),
 566                    InternalAddress(float_constant(c->as_jfloat())));
 567         }
 568       } else {
 569         ShouldNotReachHere();
 570       }
 571       break;
 572     }
 573 
 574     case T_DOUBLE: {
 575       if (dest->is_double_xmm()) {
 576         if (UseAVX <= 2 && c->is_zero_double()) {
 577           __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
 578         } else {
 579           __ movdbl(dest->as_xmm_double_reg(),
 580                     InternalAddress(double_constant(c->as_jdouble())));
 581         }
 582       } else {
 583         ShouldNotReachHere();
 584       }
 585       break;
 586     }
 587 
 588     default:
 589       ShouldNotReachHere();
 590   }
 591 }
 592 
 593 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 594   assert(src->is_constant(), "should not call otherwise");
 595   assert(dest->is_stack(), "should not call otherwise");
 596   LIR_Const* c = src->as_constant_ptr();
 597 
 598   switch (c->type()) {
 599     case T_INT:  // fall through
 600     case T_FLOAT:
 601       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 602       break;
 603 
 604     case T_ADDRESS:
 605       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 606       break;
 607 
 608     case T_OBJECT:
 609       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
 610       break;
 611 
 612     case T_LONG:  // fall through
 613     case T_DOUBLE:
 614       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 615                                               lo_word_offset_in_bytes),
 616                 (intptr_t)c->as_jlong_bits(),
 617                 rscratch1);
 618       break;
 619 
 620     default:
 621       ShouldNotReachHere();
 622   }
 623 }
 624 
 625 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 626   assert(src->is_constant(), "should not call otherwise");
 627   assert(dest->is_address(), "should not call otherwise");
 628   LIR_Const* c = src->as_constant_ptr();
 629   LIR_Address* addr = dest->as_address_ptr();
 630 
 631   int null_check_here = code_offset();
 632   switch (type) {
 633     case T_INT:    // fall through
 634     case T_FLOAT:
 635       __ movl(as_Address(addr), c->as_jint_bits());
 636       break;
 637 
 638     case T_ADDRESS:
 639       __ movptr(as_Address(addr), c->as_jint_bits());
 640       break;
 641 
 642     case T_OBJECT:  // fall through
 643     case T_ARRAY:
 644       if (c->as_jobject() == nullptr) {
 645         if (UseCompressedOops && !wide) {
 646           __ movl(as_Address(addr), NULL_WORD);
 647         } else {
 648           __ xorptr(rscratch1, rscratch1);
 649           null_check_here = code_offset();
 650           __ movptr(as_Address(addr), rscratch1);
 651         }
 652       } else {
 653         if (is_literal_address(addr)) {
 654           ShouldNotReachHere();
 655           __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
 656         } else {
 657           __ movoop(rscratch1, c->as_jobject());
 658           if (UseCompressedOops && !wide) {
 659             __ encode_heap_oop(rscratch1);
 660             null_check_here = code_offset();
 661             __ movl(as_Address_lo(addr), rscratch1);
 662           } else {
 663             null_check_here = code_offset();
 664             __ movptr(as_Address_lo(addr), rscratch1);
 665           }
 666         }
 667       }
 668       break;
 669 
 670     case T_LONG:    // fall through
 671     case T_DOUBLE:
 672       if (is_literal_address(addr)) {
 673         ShouldNotReachHere();
 674         __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
 675       } else {
 676         __ movptr(r10, (intptr_t)c->as_jlong_bits());
 677         null_check_here = code_offset();
 678         __ movptr(as_Address_lo(addr), r10);
 679       }
 680       break;
 681 
 682     case T_BOOLEAN: // fall through
 683     case T_BYTE:
 684       __ movb(as_Address(addr), c->as_jint() & 0xFF);
 685       break;
 686 
 687     case T_CHAR:    // fall through
 688     case T_SHORT:
 689       __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
 690       break;
 691 
 692     default:
 693       ShouldNotReachHere();
 694   };
 695 
 696   if (info != nullptr) {
 697     add_debug_info_for_null_check(null_check_here, info);
 698   }
 699 }
 700 
 701 
 702 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 703   assert(src->is_register(), "should not call otherwise");
 704   assert(dest->is_register(), "should not call otherwise");
 705 
 706   // move between cpu-registers
 707   if (dest->is_single_cpu()) {
 708     if (src->type() == T_LONG) {
 709       // Can do LONG -> OBJECT
 710       move_regs(src->as_register_lo(), dest->as_register());
 711       return;
 712     }
 713     assert(src->is_single_cpu(), "must match");
 714     if (src->type() == T_OBJECT) {
 715       __ verify_oop(src->as_register());
 716     }
 717     move_regs(src->as_register(), dest->as_register());
 718 
 719   } else if (dest->is_double_cpu()) {
 720     if (is_reference_type(src->type())) {
 721       // Surprising to me but we can see move of a long to t_object
 722       __ verify_oop(src->as_register());
 723       move_regs(src->as_register(), dest->as_register_lo());
 724       return;
 725     }
 726     assert(src->is_double_cpu(), "must match");
 727     Register f_lo = src->as_register_lo();
 728     Register f_hi = src->as_register_hi();
 729     Register t_lo = dest->as_register_lo();
 730     Register t_hi = dest->as_register_hi();
 731     assert(f_hi == f_lo, "must be same");
 732     assert(t_hi == t_lo, "must be same");
 733     move_regs(f_lo, t_lo);
 734 
 735     // move between xmm-registers
 736   } else if (dest->is_single_xmm()) {
 737     assert(src->is_single_xmm(), "must match");
 738     __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
 739   } else if (dest->is_double_xmm()) {
 740     assert(src->is_double_xmm(), "must match");
 741     __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
 742 
 743   } else {
 744     ShouldNotReachHere();
 745   }
 746 }
 747 
 748 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 749   assert(src->is_register(), "should not call otherwise");
 750   assert(dest->is_stack(), "should not call otherwise");
 751 
 752   if (src->is_single_cpu()) {
 753     Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
 754     if (is_reference_type(type)) {
 755       __ verify_oop(src->as_register());
 756       __ movptr (dst, src->as_register());
 757     } else if (type == T_METADATA || type == T_ADDRESS) {
 758       __ movptr (dst, src->as_register());
 759     } else {
 760       __ movl (dst, src->as_register());
 761     }
 762 
 763   } else if (src->is_double_cpu()) {
 764     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 765     Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
 766     __ movptr (dstLO, src->as_register_lo());
 767 
 768   } else if (src->is_single_xmm()) {
 769     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 770     __ movflt(dst_addr, src->as_xmm_float_reg());
 771 
 772   } else if (src->is_double_xmm()) {
 773     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 774     __ movdbl(dst_addr, src->as_xmm_double_reg());
 775 
 776   } else {
 777     ShouldNotReachHere();
 778   }
 779 }
 780 
 781 
 782 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 783   LIR_Address* to_addr = dest->as_address_ptr();
 784   PatchingStub* patch = nullptr;
 785   Register compressed_src = rscratch1;
 786 
 787   if (is_reference_type(type)) {
 788     __ verify_oop(src->as_register());
 789     if (UseCompressedOops && !wide) {
 790       __ movptr(compressed_src, src->as_register());
 791       __ encode_heap_oop(compressed_src);
 792       if (patch_code != lir_patch_none) {
 793         info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
 794       }
 795     }
 796   }
 797 
 798   if (patch_code != lir_patch_none) {
 799     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
 800     Address toa = as_Address(to_addr);
 801     assert(toa.disp() != 0, "must have");
 802   }
 803 
 804   int null_check_here = code_offset();
 805   switch (type) {
 806     case T_FLOAT: {
 807       assert(src->is_single_xmm(), "not a float");
 808       __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
 809       break;
 810     }
 811 
 812     case T_DOUBLE: {
 813       assert(src->is_double_xmm(), "not a double");
 814       __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
 815       break;
 816     }
 817 
 818     case T_ARRAY:   // fall through
 819     case T_OBJECT:  // fall through
 820       if (UseCompressedOops && !wide) {
 821         __ movl(as_Address(to_addr), compressed_src);
 822       } else {
 823         __ movptr(as_Address(to_addr), src->as_register());
 824       }
 825       break;
 826     case T_ADDRESS:
 827       __ movptr(as_Address(to_addr), src->as_register());
 828       break;
 829     case T_INT:
 830       __ movl(as_Address(to_addr), src->as_register());
 831       break;
 832 
 833     case T_LONG: {
 834       Register from_lo = src->as_register_lo();
 835       Register from_hi = src->as_register_hi();
 836       __ movptr(as_Address_lo(to_addr), from_lo);
 837       break;
 838     }
 839 
 840     case T_BYTE:    // fall through
 841     case T_BOOLEAN: {
 842       Register src_reg = src->as_register();
 843       Address dst_addr = as_Address(to_addr);
 844       assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
 845       __ movb(dst_addr, src_reg);
 846       break;
 847     }
 848 
 849     case T_CHAR:    // fall through
 850     case T_SHORT:
 851       __ movw(as_Address(to_addr), src->as_register());
 852       break;
 853 
 854     default:
 855       ShouldNotReachHere();
 856   }
 857   if (info != nullptr) {
 858     add_debug_info_for_null_check(null_check_here, info);
 859   }
 860 
 861   if (patch_code != lir_patch_none) {
 862     patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
 863   }
 864 }
 865 
 866 
 867 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 868   assert(src->is_stack(), "should not call otherwise");
 869   assert(dest->is_register(), "should not call otherwise");
 870 
 871   if (dest->is_single_cpu()) {
 872     if (is_reference_type(type)) {
 873       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 874       __ verify_oop(dest->as_register());
 875     } else if (type == T_METADATA || type == T_ADDRESS) {
 876       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 877     } else {
 878       __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 879     }
 880 
 881   } else if (dest->is_double_cpu()) {
 882     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 883     Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
 884     __ movptr(dest->as_register_lo(), src_addr_LO);
 885 
 886   } else if (dest->is_single_xmm()) {
 887     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 888     __ movflt(dest->as_xmm_float_reg(), src_addr);
 889 
 890   } else if (dest->is_double_xmm()) {
 891     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 892     __ movdbl(dest->as_xmm_double_reg(), src_addr);
 893 
 894   } else {
 895     ShouldNotReachHere();
 896   }
 897 }
 898 
 899 
 900 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 901   if (src->is_single_stack()) {
 902     if (is_reference_type(type)) {
 903       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
 904       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
 905     } else {
 906       //no pushl on 64bits
 907       __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
 908       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
 909     }
 910 
 911   } else if (src->is_double_stack()) {
 912     __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
 913     __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
 914 
 915   } else {
 916     ShouldNotReachHere();
 917   }
 918 }
 919 
 920 
 921 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 922   assert(src->is_address(), "should not call otherwise");
 923   assert(dest->is_register(), "should not call otherwise");
 924 
 925   LIR_Address* addr = src->as_address_ptr();
 926   Address from_addr = as_Address(addr);
 927 
 928   if (addr->base()->type() == T_OBJECT) {
 929     __ verify_oop(addr->base()->as_pointer_register());
 930   }
 931 
 932   switch (type) {
 933     case T_BOOLEAN: // fall through
 934     case T_BYTE:    // fall through
 935     case T_CHAR:    // fall through
 936     case T_SHORT:
 937       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
 938         // on pre P6 processors we may get partial register stalls
 939         // so blow away the value of to_rinfo before loading a
 940         // partial word into it.  Do it here so that it precedes
 941         // the potential patch point below.
 942         __ xorptr(dest->as_register(), dest->as_register());
 943       }
 944       break;
 945    default:
 946      break;
 947   }
 948 
 949   PatchingStub* patch = nullptr;
 950   if (patch_code != lir_patch_none) {
 951     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
 952     assert(from_addr.disp() != 0, "must have");
 953   }
 954   if (info != nullptr) {
 955     add_debug_info_for_null_check_here(info);
 956   }
 957 
 958   switch (type) {
 959     case T_FLOAT: {
 960       if (dest->is_single_xmm()) {
 961         __ movflt(dest->as_xmm_float_reg(), from_addr);
 962       } else {
 963         ShouldNotReachHere();
 964       }
 965       break;
 966     }
 967 
 968     case T_DOUBLE: {
 969       if (dest->is_double_xmm()) {
 970         __ movdbl(dest->as_xmm_double_reg(), from_addr);
 971       } else {
 972         ShouldNotReachHere();
 973       }
 974       break;
 975     }
 976 
 977     case T_OBJECT:  // fall through
 978     case T_ARRAY:   // fall through
 979       if (UseCompressedOops && !wide) {
 980         __ movl(dest->as_register(), from_addr);
 981       } else {
 982         __ movptr(dest->as_register(), from_addr);
 983       }
 984       break;
 985 
 986     case T_ADDRESS:
 987       __ movptr(dest->as_register(), from_addr);
 988       break;
 989     case T_INT:
 990       __ movl(dest->as_register(), from_addr);
 991       break;
 992 
 993     case T_LONG: {
 994       Register to_lo = dest->as_register_lo();
 995       Register to_hi = dest->as_register_hi();
 996       __ movptr(to_lo, as_Address_lo(addr));
 997       break;
 998     }
 999 
1000     case T_BOOLEAN: // fall through
1001     case T_BYTE: {
1002       Register dest_reg = dest->as_register();
1003       assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1004       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1005         __ movsbl(dest_reg, from_addr);
1006       } else {
1007         __ movb(dest_reg, from_addr);
1008         __ shll(dest_reg, 24);
1009         __ sarl(dest_reg, 24);
1010       }
1011       break;
1012     }
1013 
1014     case T_CHAR: {
1015       Register dest_reg = dest->as_register();
1016       assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1017       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1018         __ movzwl(dest_reg, from_addr);
1019       } else {
1020         __ movw(dest_reg, from_addr);
1021       }
1022       break;
1023     }
1024 
1025     case T_SHORT: {
1026       Register dest_reg = dest->as_register();
1027       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1028         __ movswl(dest_reg, from_addr);
1029       } else {
1030         __ movw(dest_reg, from_addr);
1031         __ shll(dest_reg, 16);
1032         __ sarl(dest_reg, 16);
1033       }
1034       break;
1035     }
1036 
1037     default:
1038       ShouldNotReachHere();
1039   }
1040 
1041   if (patch != nullptr) {
1042     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1043   }
1044 
1045   if (is_reference_type(type)) {
1046     if (UseCompressedOops && !wide) {
1047       __ decode_heap_oop(dest->as_register());
1048     }
1049 
1050     __ verify_oop(dest->as_register());
1051   }
1052 }
1053 
1054 
1055 NEEDS_CLEANUP; // This could be static?
1056 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1057   int elem_size = type2aelembytes(type);
1058   switch (elem_size) {
1059     case 1: return Address::times_1;
1060     case 2: return Address::times_2;
1061     case 4: return Address::times_4;
1062     case 8: return Address::times_8;
1063   }
1064   ShouldNotReachHere();
1065   return Address::no_scale;
1066 }
1067 
1068 
1069 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1070   switch (op->code()) {
1071     case lir_idiv:
1072     case lir_irem:
1073       arithmetic_idiv(op->code(),
1074                       op->in_opr1(),
1075                       op->in_opr2(),
1076                       op->in_opr3(),
1077                       op->result_opr(),
1078                       op->info());
1079       break;
1080     case lir_fmad:
1081       __ fmad(op->result_opr()->as_xmm_double_reg(),
1082               op->in_opr1()->as_xmm_double_reg(),
1083               op->in_opr2()->as_xmm_double_reg(),
1084               op->in_opr3()->as_xmm_double_reg());
1085       break;
1086     case lir_fmaf:
1087       __ fmaf(op->result_opr()->as_xmm_float_reg(),
1088               op->in_opr1()->as_xmm_float_reg(),
1089               op->in_opr2()->as_xmm_float_reg(),
1090               op->in_opr3()->as_xmm_float_reg());
1091       break;
1092     default:      ShouldNotReachHere(); break;
1093   }
1094 }
1095 
1096 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1097 #ifdef ASSERT
1098   assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1099   if (op->block() != nullptr)  _branch_target_blocks.append(op->block());
1100   if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1101 #endif
1102 
1103   if (op->cond() == lir_cond_always) {
1104     if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1105     __ jmp (*(op->label()));
1106   } else {
1107     Assembler::Condition acond = Assembler::zero;
1108     if (op->code() == lir_cond_float_branch) {
1109       assert(op->ublock() != nullptr, "must have unordered successor");
1110       __ jcc(Assembler::parity, *(op->ublock()->label()));
1111       switch(op->cond()) {
1112         case lir_cond_equal:        acond = Assembler::equal;      break;
1113         case lir_cond_notEqual:     acond = Assembler::notEqual;   break;
1114         case lir_cond_less:         acond = Assembler::below;      break;
1115         case lir_cond_lessEqual:    acond = Assembler::belowEqual; break;
1116         case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1117         case lir_cond_greater:      acond = Assembler::above;      break;
1118         default:                         ShouldNotReachHere();
1119       }
1120     } else {
1121       switch (op->cond()) {
1122         case lir_cond_equal:        acond = Assembler::equal;       break;
1123         case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
1124         case lir_cond_less:         acond = Assembler::less;        break;
1125         case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
1126         case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1127         case lir_cond_greater:      acond = Assembler::greater;     break;
1128         case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
1129         case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
1130         default:                         ShouldNotReachHere();
1131       }
1132     }
1133     __ jcc(acond,*(op->label()));
1134   }
1135 }
1136 
1137 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1138   LIR_Opr src  = op->in_opr();
1139   LIR_Opr dest = op->result_opr();
1140 
1141   switch (op->bytecode()) {
1142     case Bytecodes::_i2l:
1143       __ movl2ptr(dest->as_register_lo(), src->as_register());
1144       break;
1145 
1146     case Bytecodes::_l2i:
1147       __ movl(dest->as_register(), src->as_register_lo());
1148       break;
1149 
1150     case Bytecodes::_i2b:
1151       move_regs(src->as_register(), dest->as_register());
1152       __ sign_extend_byte(dest->as_register());
1153       break;
1154 
1155     case Bytecodes::_i2c:
1156       move_regs(src->as_register(), dest->as_register());
1157       __ andl(dest->as_register(), 0xFFFF);
1158       break;
1159 
1160     case Bytecodes::_i2s:
1161       move_regs(src->as_register(), dest->as_register());
1162       __ sign_extend_short(dest->as_register());
1163       break;
1164 
1165     case Bytecodes::_f2d:
1166       __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1167       break;
1168 
1169     case Bytecodes::_d2f:
1170       __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1171       break;
1172 
1173     case Bytecodes::_i2f:
1174       __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1175       break;
1176 
1177     case Bytecodes::_i2d:
1178       __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1179       break;
1180 
1181     case Bytecodes::_l2f:
1182       __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1183       break;
1184 
1185     case Bytecodes::_l2d:
1186       __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1187       break;
1188 
1189     case Bytecodes::_f2i:
1190       __ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1191       break;
1192 
1193     case Bytecodes::_d2i:
1194       __ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1195       break;
1196 
1197     case Bytecodes::_f2l:
1198       __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1199       break;
1200 
1201     case Bytecodes::_d2l:
1202       __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1203       break;
1204 
1205     default: ShouldNotReachHere();
1206   }
1207 }
1208 
1209 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1210   if (op->init_check()) {
1211     add_debug_info_for_null_check_here(op->stub()->info());
1212     // init_state needs acquire, but x86 is TSO, and so we are already good.
1213     __ cmpb(Address(op->klass()->as_register(),
1214                     InstanceKlass::init_state_offset()),
1215                     InstanceKlass::fully_initialized);
1216     __ jcc(Assembler::notEqual, *op->stub()->entry());
1217   }
1218   __ allocate_object(op->obj()->as_register(),
1219                      op->tmp1()->as_register(),
1220                      op->tmp2()->as_register(),
1221                      op->header_size(),
1222                      op->object_size(),
1223                      op->klass()->as_register(),
1224                      *op->stub()->entry());
1225   __ bind(*op->stub()->continuation());
1226 }
1227 
1228 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1229   Register len =  op->len()->as_register();
1230   __ movslq(len, len);
1231 
1232   if (UseSlowPath ||
1233       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1234       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1235     __ jmp(*op->stub()->entry());
1236   } else {
1237     Register tmp1 = op->tmp1()->as_register();
1238     Register tmp2 = op->tmp2()->as_register();
1239     Register tmp3 = op->tmp3()->as_register();
1240     if (len == tmp1) {
1241       tmp1 = tmp3;
1242     } else if (len == tmp2) {
1243       tmp2 = tmp3;
1244     } else if (len == tmp3) {
1245       // everything is ok
1246     } else {
1247       __ mov(tmp3, len);
1248     }
1249     __ allocate_array(op->obj()->as_register(),
1250                       len,
1251                       tmp1,
1252                       tmp2,
1253                       arrayOopDesc::base_offset_in_bytes(op->type()),
1254                       array_element_size(op->type()),
1255                       op->klass()->as_register(),
1256                       *op->stub()->entry(),
1257                       op->zero_array());
1258   }
1259   __ bind(*op->stub()->continuation());
1260 }
1261 
1262 void LIR_Assembler::type_profile_helper(Register mdo,
1263                                         ciMethodData *md, ciProfileData *data,
1264                                         Register recv, Label* update_done) {
1265   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1266     Label next_test;
1267     // See if the receiver is receiver[n].
1268     __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1269     __ jccb(Assembler::notEqual, next_test);
1270     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1271     __ addptr(data_addr, DataLayout::counter_increment);
1272     __ jmp(*update_done);
1273     __ bind(next_test);
1274   }
1275 
1276   // Didn't find receiver; find next empty slot and fill it in
1277   for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1278     Label next_test;
1279     Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1280     __ cmpptr(recv_addr, NULL_WORD);
1281     __ jccb(Assembler::notEqual, next_test);
1282     __ movptr(recv_addr, recv);
1283     __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1284     __ jmp(*update_done);
1285     __ bind(next_test);
1286   }
1287 }
1288 
1289 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1290   // we always need a stub for the failure case.
1291   CodeStub* stub = op->stub();
1292   Register obj = op->object()->as_register();
1293   Register k_RInfo = op->tmp1()->as_register();
1294   Register klass_RInfo = op->tmp2()->as_register();
1295   Register dst = op->result_opr()->as_register();
1296   ciKlass* k = op->klass();
1297   Register Rtmp1 = noreg;
1298   Register tmp_load_klass = rscratch1;
1299 
1300   // check if it needs to be profiled
1301   ciMethodData* md = nullptr;
1302   ciProfileData* data = nullptr;
1303 
1304   if (op->should_profile()) {
1305     ciMethod* method = op->profiled_method();
1306     assert(method != nullptr, "Should have method");
1307     int bci = op->profiled_bci();
1308     md = method->method_data_or_null();
1309     assert(md != nullptr, "Sanity");
1310     data = md->bci_to_data(bci);
1311     assert(data != nullptr,                "need data for type check");
1312     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1313   }
1314   Label* success_target = success;
1315   Label* failure_target = failure;
1316 
1317   if (obj == k_RInfo) {
1318     k_RInfo = dst;
1319   } else if (obj == klass_RInfo) {
1320     klass_RInfo = dst;
1321   }
1322   if (k->is_loaded() && !UseCompressedClassPointers) {
1323     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1324   } else {
1325     Rtmp1 = op->tmp3()->as_register();
1326     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1327   }
1328 
1329   assert_different_registers(obj, k_RInfo, klass_RInfo);
1330 
1331   __ testptr(obj, obj);
1332   if (op->should_profile()) {
1333     Label not_null;
1334     Register mdo  = klass_RInfo;
1335     __ mov_metadata(mdo, md->constant_encoding());
1336     __ jccb(Assembler::notEqual, not_null);
1337     // Object is null; update MDO and exit
1338     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1339     int header_bits = BitData::null_seen_byte_constant();
1340     __ orb(data_addr, header_bits);
1341     __ jmp(*obj_is_null);
1342     __ bind(not_null);
1343 
1344     Label update_done;
1345     Register recv = k_RInfo;
1346     __ load_klass(recv, obj, tmp_load_klass);
1347     type_profile_helper(mdo, md, data, recv, &update_done);
1348 
1349     Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1350     __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
1351 
1352     __ bind(update_done);
1353   } else {
1354     __ jcc(Assembler::equal, *obj_is_null);
1355   }
1356 
1357   if (!k->is_loaded()) {
1358     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1359   } else {
1360     __ mov_metadata(k_RInfo, k->constant_encoding());
1361   }
1362   __ verify_oop(obj);
1363 
1364   if (op->fast_check()) {
1365     // get object class
1366     // not a safepoint as obj null check happens earlier
1367     if (UseCompressedClassPointers) {
1368       __ load_klass(Rtmp1, obj, tmp_load_klass);
1369       __ cmpptr(k_RInfo, Rtmp1);
1370     } else {
1371       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1372     }
1373     __ jcc(Assembler::notEqual, *failure_target);
1374     // successful cast, fall through to profile or jump
1375   } else {
1376     // get object class
1377     // not a safepoint as obj null check happens earlier
1378     __ load_klass(klass_RInfo, obj, tmp_load_klass);
1379     if (k->is_loaded()) {
1380       // See if we get an immediate positive hit
1381       __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1382       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1383         __ jcc(Assembler::notEqual, *failure_target);
1384         // successful cast, fall through to profile or jump
1385       } else {
1386         // See if we get an immediate positive hit
1387         __ jcc(Assembler::equal, *success_target);
1388         // check for self
1389         __ cmpptr(klass_RInfo, k_RInfo);
1390         __ jcc(Assembler::equal, *success_target);
1391 
1392         __ push_ppx(klass_RInfo);
1393         __ push_ppx(k_RInfo);
1394         __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1395         __ pop_ppx(klass_RInfo);
1396         __ pop_ppx(klass_RInfo);
1397         // result is a boolean
1398         __ testl(klass_RInfo, klass_RInfo);
1399         __ jcc(Assembler::equal, *failure_target);
1400         // successful cast, fall through to profile or jump
1401       }
1402     } else {
1403       // perform the fast part of the checking logic
1404       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1405       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1406       __ push_ppx(klass_RInfo);
1407       __ push_ppx(k_RInfo);
1408       __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1409       __ pop_ppx(klass_RInfo);
1410       __ pop_ppx(k_RInfo);
1411       // result is a boolean
1412       __ testl(k_RInfo, k_RInfo);
1413       __ jcc(Assembler::equal, *failure_target);
1414       // successful cast, fall through to profile or jump
1415     }
1416   }
1417   __ jmp(*success);
1418 }
1419 
1420 
1421 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1422   Register tmp_load_klass = rscratch1;
1423   LIR_Code code = op->code();
1424   if (code == lir_store_check) {
1425     Register value = op->object()->as_register();
1426     Register array = op->array()->as_register();
1427     Register k_RInfo = op->tmp1()->as_register();
1428     Register klass_RInfo = op->tmp2()->as_register();
1429     Register Rtmp1 = op->tmp3()->as_register();
1430 
1431     CodeStub* stub = op->stub();
1432 
1433     // check if it needs to be profiled
1434     ciMethodData* md = nullptr;
1435     ciProfileData* data = nullptr;
1436 
1437     if (op->should_profile()) {
1438       ciMethod* method = op->profiled_method();
1439       assert(method != nullptr, "Should have method");
1440       int bci = op->profiled_bci();
1441       md = method->method_data_or_null();
1442       assert(md != nullptr, "Sanity");
1443       data = md->bci_to_data(bci);
1444       assert(data != nullptr,                "need data for type check");
1445       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1446     }
1447     Label done;
1448     Label* success_target = &done;
1449     Label* failure_target = stub->entry();
1450 
1451     __ testptr(value, value);
1452     if (op->should_profile()) {
1453       Label not_null;
1454       Register mdo  = klass_RInfo;
1455       __ mov_metadata(mdo, md->constant_encoding());
1456       __ jccb(Assembler::notEqual, not_null);
1457       // Object is null; update MDO and exit
1458       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1459       int header_bits = BitData::null_seen_byte_constant();
1460       __ orb(data_addr, header_bits);
1461       __ jmp(done);
1462       __ bind(not_null);
1463 
1464       Label update_done;
1465       Register recv = k_RInfo;
1466       __ load_klass(recv, value, tmp_load_klass);
1467       type_profile_helper(mdo, md, data, recv, &update_done);
1468 
1469       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1470       __ addptr(counter_addr, DataLayout::counter_increment);
1471       __ bind(update_done);
1472     } else {
1473       __ jcc(Assembler::equal, done);
1474     }
1475 
1476     add_debug_info_for_null_check_here(op->info_for_exception());
1477     __ load_klass(k_RInfo, array, tmp_load_klass);
1478     __ load_klass(klass_RInfo, value, tmp_load_klass);
1479 
1480     // get instance klass (it's already uncompressed)
1481     __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1482     // perform the fast part of the checking logic
1483     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1484     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1485     __ push_ppx(klass_RInfo);
1486     __ push_ppx(k_RInfo);
1487     __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1488     __ pop_ppx(klass_RInfo);
1489     __ pop_ppx(k_RInfo);
1490     // result is a boolean
1491     __ testl(k_RInfo, k_RInfo);
1492     __ jcc(Assembler::equal, *failure_target);
1493     // fall through to the success case
1494 
1495     __ bind(done);
1496   } else
1497     if (code == lir_checkcast) {
1498       Register obj = op->object()->as_register();
1499       Register dst = op->result_opr()->as_register();
1500       Label success;
1501       emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1502       __ bind(success);
1503       if (dst != obj) {
1504         __ mov(dst, obj);
1505       }
1506     } else
1507       if (code == lir_instanceof) {
1508         Register obj = op->object()->as_register();
1509         Register dst = op->result_opr()->as_register();
1510         Label success, failure, done;
1511         emit_typecheck_helper(op, &success, &failure, &failure);
1512         __ bind(failure);
1513         __ xorptr(dst, dst);
1514         __ jmpb(done);
1515         __ bind(success);
1516         __ movptr(dst, 1);
1517         __ bind(done);
1518       } else {
1519         ShouldNotReachHere();
1520       }
1521 
1522 }
1523 
1524 
1525 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1526   if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1527     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1528     Register newval = op->new_value()->as_register();
1529     Register cmpval = op->cmp_value()->as_register();
1530     assert(cmpval == rax, "wrong register");
1531     assert(newval != noreg, "new val must be register");
1532     assert(cmpval != newval, "cmp and new values must be in different registers");
1533     assert(cmpval != addr, "cmp and addr must be in different registers");
1534     assert(newval != addr, "new value and addr must be in different registers");
1535 
1536     if (op->code() == lir_cas_obj) {
1537       if (UseCompressedOops) {
1538         __ encode_heap_oop(cmpval);
1539         __ mov(rscratch1, newval);
1540         __ encode_heap_oop(rscratch1);
1541         __ lock();
1542         // cmpval (rax) is implicitly used by this instruction
1543         __ cmpxchgl(rscratch1, Address(addr, 0));
1544       } else {
1545         __ lock();
1546         __ cmpxchgptr(newval, Address(addr, 0));
1547       }
1548     } else {
1549       assert(op->code() == lir_cas_int, "lir_cas_int expected");
1550       __ lock();
1551       __ cmpxchgl(newval, Address(addr, 0));
1552     }
1553   } else if (op->code() == lir_cas_long) {
1554     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1555     Register newval = op->new_value()->as_register_lo();
1556     Register cmpval = op->cmp_value()->as_register_lo();
1557     assert(cmpval == rax, "wrong register");
1558     assert(newval != noreg, "new val must be register");
1559     assert(cmpval != newval, "cmp and new values must be in different registers");
1560     assert(cmpval != addr, "cmp and addr must be in different registers");
1561     assert(newval != addr, "new value and addr must be in different registers");
1562     __ lock();
1563     __ cmpxchgq(newval, Address(addr, 0));
1564   } else {
1565     Unimplemented();
1566   }
1567 }
1568 
1569 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1570                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1571   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1572 
1573   Assembler::Condition acond, ncond;
1574   switch (condition) {
1575     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1576     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1577     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1578     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1579     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1580     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1581     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1582     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1583     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
1584                                 ShouldNotReachHere();
1585   }
1586 
1587   if (opr1->is_cpu_register()) {
1588     reg2reg(opr1, result);
1589   } else if (opr1->is_stack()) {
1590     stack2reg(opr1, result, result->type());
1591   } else if (opr1->is_constant()) {
1592     const2reg(opr1, result, lir_patch_none, nullptr);
1593   } else {
1594     ShouldNotReachHere();
1595   }
1596 
1597   if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1598     // optimized version that does not require a branch
1599     if (opr2->is_single_cpu()) {
1600       assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1601       __ cmov(ncond, result->as_register(), opr2->as_register());
1602     } else if (opr2->is_double_cpu()) {
1603       assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1604       assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1605       __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
1606     } else if (opr2->is_single_stack()) {
1607       __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1608     } else if (opr2->is_double_stack()) {
1609       __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1610     } else {
1611       ShouldNotReachHere();
1612     }
1613 
1614   } else {
1615     Label skip;
1616     __ jccb(acond, skip);
1617     if (opr2->is_cpu_register()) {
1618       reg2reg(opr2, result);
1619     } else if (opr2->is_stack()) {
1620       stack2reg(opr2, result, result->type());
1621     } else if (opr2->is_constant()) {
1622       const2reg(opr2, result, lir_patch_none, nullptr);
1623     } else {
1624       ShouldNotReachHere();
1625     }
1626     __ bind(skip);
1627   }
1628 }
1629 
1630 
1631 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1632   assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1633 
1634   if (left->is_single_cpu()) {
1635     assert(left == dest, "left and dest must be equal");
1636     Register lreg = left->as_register();
1637 
1638     if (right->is_single_cpu()) {
1639       // cpu register - cpu register
1640       Register rreg = right->as_register();
1641       switch (code) {
1642         case lir_add: __ addl (lreg, rreg); break;
1643         case lir_sub: __ subl (lreg, rreg); break;
1644         case lir_mul: __ imull(lreg, rreg); break;
1645         default:      ShouldNotReachHere();
1646       }
1647 
1648     } else if (right->is_stack()) {
1649       // cpu register - stack
1650       Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1651       switch (code) {
1652         case lir_add: __ addl(lreg, raddr); break;
1653         case lir_sub: __ subl(lreg, raddr); break;
1654         default:      ShouldNotReachHere();
1655       }
1656 
1657     } else if (right->is_constant()) {
1658       // cpu register - constant
1659       jint c = right->as_constant_ptr()->as_jint();
1660       switch (code) {
1661         case lir_add: {
1662           __ incrementl(lreg, c);
1663           break;
1664         }
1665         case lir_sub: {
1666           __ decrementl(lreg, c);
1667           break;
1668         }
1669         default: ShouldNotReachHere();
1670       }
1671 
1672     } else {
1673       ShouldNotReachHere();
1674     }
1675 
1676   } else if (left->is_double_cpu()) {
1677     assert(left == dest, "left and dest must be equal");
1678     Register lreg_lo = left->as_register_lo();
1679     Register lreg_hi = left->as_register_hi();
1680 
1681     if (right->is_double_cpu()) {
1682       // cpu register - cpu register
1683       Register rreg_lo = right->as_register_lo();
1684       Register rreg_hi = right->as_register_hi();
1685       assert_different_registers(lreg_lo, rreg_lo);
1686       switch (code) {
1687         case lir_add:
1688           __ addptr(lreg_lo, rreg_lo);
1689           break;
1690         case lir_sub:
1691           __ subptr(lreg_lo, rreg_lo);
1692           break;
1693         case lir_mul:
1694           __ imulq(lreg_lo, rreg_lo);
1695           break;
1696         default:
1697           ShouldNotReachHere();
1698       }
1699 
1700     } else if (right->is_constant()) {
1701       // cpu register - constant
1702       jlong c = right->as_constant_ptr()->as_jlong_bits();
1703       __ movptr(r10, (intptr_t) c);
1704       switch (code) {
1705         case lir_add:
1706           __ addptr(lreg_lo, r10);
1707           break;
1708         case lir_sub:
1709           __ subptr(lreg_lo, r10);
1710           break;
1711         default:
1712           ShouldNotReachHere();
1713       }
1714 
1715     } else {
1716       ShouldNotReachHere();
1717     }
1718 
1719   } else if (left->is_single_xmm()) {
1720     assert(left == dest, "left and dest must be equal");
1721     XMMRegister lreg = left->as_xmm_float_reg();
1722 
1723     if (right->is_single_xmm()) {
1724       XMMRegister rreg = right->as_xmm_float_reg();
1725       switch (code) {
1726         case lir_add: __ addss(lreg, rreg);  break;
1727         case lir_sub: __ subss(lreg, rreg);  break;
1728         case lir_mul: __ mulss(lreg, rreg);  break;
1729         case lir_div: __ divss(lreg, rreg);  break;
1730         default: ShouldNotReachHere();
1731       }
1732     } else {
1733       Address raddr;
1734       if (right->is_single_stack()) {
1735         raddr = frame_map()->address_for_slot(right->single_stack_ix());
1736       } else if (right->is_constant()) {
1737         // hack for now
1738         raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
1739       } else {
1740         ShouldNotReachHere();
1741       }
1742       switch (code) {
1743         case lir_add: __ addss(lreg, raddr);  break;
1744         case lir_sub: __ subss(lreg, raddr);  break;
1745         case lir_mul: __ mulss(lreg, raddr);  break;
1746         case lir_div: __ divss(lreg, raddr);  break;
1747         default: ShouldNotReachHere();
1748       }
1749     }
1750 
1751   } else if (left->is_double_xmm()) {
1752     assert(left == dest, "left and dest must be equal");
1753 
1754     XMMRegister lreg = left->as_xmm_double_reg();
1755     if (right->is_double_xmm()) {
1756       XMMRegister rreg = right->as_xmm_double_reg();
1757       switch (code) {
1758         case lir_add: __ addsd(lreg, rreg);  break;
1759         case lir_sub: __ subsd(lreg, rreg);  break;
1760         case lir_mul: __ mulsd(lreg, rreg);  break;
1761         case lir_div: __ divsd(lreg, rreg);  break;
1762         default: ShouldNotReachHere();
1763       }
1764     } else {
1765       Address raddr;
1766       if (right->is_double_stack()) {
1767         raddr = frame_map()->address_for_slot(right->double_stack_ix());
1768       } else if (right->is_constant()) {
1769         // hack for now
1770         raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
1771       } else {
1772         ShouldNotReachHere();
1773       }
1774       switch (code) {
1775         case lir_add: __ addsd(lreg, raddr);  break;
1776         case lir_sub: __ subsd(lreg, raddr);  break;
1777         case lir_mul: __ mulsd(lreg, raddr);  break;
1778         case lir_div: __ divsd(lreg, raddr);  break;
1779         default: ShouldNotReachHere();
1780       }
1781     }
1782 
1783   } else if (left->is_single_stack() || left->is_address()) {
1784     assert(left == dest, "left and dest must be equal");
1785 
1786     Address laddr;
1787     if (left->is_single_stack()) {
1788       laddr = frame_map()->address_for_slot(left->single_stack_ix());
1789     } else if (left->is_address()) {
1790       laddr = as_Address(left->as_address_ptr());
1791     } else {
1792       ShouldNotReachHere();
1793     }
1794 
1795     if (right->is_single_cpu()) {
1796       Register rreg = right->as_register();
1797       switch (code) {
1798         case lir_add: __ addl(laddr, rreg); break;
1799         case lir_sub: __ subl(laddr, rreg); break;
1800         default:      ShouldNotReachHere();
1801       }
1802     } else if (right->is_constant()) {
1803       jint c = right->as_constant_ptr()->as_jint();
1804       switch (code) {
1805         case lir_add: {
1806           __ incrementl(laddr, c);
1807           break;
1808         }
1809         case lir_sub: {
1810           __ decrementl(laddr, c);
1811           break;
1812         }
1813         default: ShouldNotReachHere();
1814       }
1815     } else {
1816       ShouldNotReachHere();
1817     }
1818 
1819   } else {
1820     ShouldNotReachHere();
1821   }
1822 }
1823 
1824 
1825 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1826   if (value->is_double_xmm()) {
1827     switch(code) {
1828       case lir_abs :
1829         {
1830           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1831             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1832           }
1833           assert(!tmp->is_valid(), "do not need temporary");
1834           __ andpd(dest->as_xmm_double_reg(),
1835                    ExternalAddress((address)double_signmask_pool),
1836                    rscratch1);
1837         }
1838         break;
1839 
1840       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1841       // all other intrinsics are not available in the SSE instruction set, so FPU is used
1842       default      : ShouldNotReachHere();
1843     }
1844 
1845   } else if (code == lir_f2hf) {
1846     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1847   } else if (code == lir_hf2f) {
1848     __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1849   } else {
1850     Unimplemented();
1851   }
1852 }
1853 
1854 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1855   // assert(left->destroys_register(), "check");
1856   if (left->is_single_cpu()) {
1857     Register reg = left->as_register();
1858     if (right->is_constant()) {
1859       int val = right->as_constant_ptr()->as_jint();
1860       switch (code) {
1861         case lir_logic_and: __ andl (reg, val); break;
1862         case lir_logic_or:  __ orl  (reg, val); break;
1863         case lir_logic_xor: __ xorl (reg, val); break;
1864         default: ShouldNotReachHere();
1865       }
1866     } else if (right->is_stack()) {
1867       // added support for stack operands
1868       Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1869       switch (code) {
1870         case lir_logic_and: __ andl (reg, raddr); break;
1871         case lir_logic_or:  __ orl  (reg, raddr); break;
1872         case lir_logic_xor: __ xorl (reg, raddr); break;
1873         default: ShouldNotReachHere();
1874       }
1875     } else {
1876       Register rright = right->as_register();
1877       switch (code) {
1878         case lir_logic_and: __ andptr (reg, rright); break;
1879         case lir_logic_or : __ orptr  (reg, rright); break;
1880         case lir_logic_xor: __ xorptr (reg, rright); break;
1881         default: ShouldNotReachHere();
1882       }
1883     }
1884     move_regs(reg, dst->as_register());
1885   } else {
1886     Register l_lo = left->as_register_lo();
1887     Register l_hi = left->as_register_hi();
1888     if (right->is_constant()) {
1889       __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
1890       switch (code) {
1891         case lir_logic_and:
1892           __ andq(l_lo, rscratch1);
1893           break;
1894         case lir_logic_or:
1895           __ orq(l_lo, rscratch1);
1896           break;
1897         case lir_logic_xor:
1898           __ xorq(l_lo, rscratch1);
1899           break;
1900         default: ShouldNotReachHere();
1901       }
1902     } else {
1903       Register r_lo;
1904       if (is_reference_type(right->type())) {
1905         r_lo = right->as_register();
1906       } else {
1907         r_lo = right->as_register_lo();
1908       }
1909       switch (code) {
1910         case lir_logic_and:
1911           __ andptr(l_lo, r_lo);
1912           break;
1913         case lir_logic_or:
1914           __ orptr(l_lo, r_lo);
1915           break;
1916         case lir_logic_xor:
1917           __ xorptr(l_lo, r_lo);
1918           break;
1919         default: ShouldNotReachHere();
1920       }
1921     }
1922 
1923     Register dst_lo = dst->as_register_lo();
1924     Register dst_hi = dst->as_register_hi();
1925 
1926     move_regs(l_lo, dst_lo);
1927   }
1928 }
1929 
1930 
1931 // we assume that rax, and rdx can be overwritten
1932 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
1933 
1934   assert(left->is_single_cpu(),   "left must be register");
1935   assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
1936   assert(result->is_single_cpu(), "result must be register");
1937 
1938   //  assert(left->destroys_register(), "check");
1939   //  assert(right->destroys_register(), "check");
1940 
1941   Register lreg = left->as_register();
1942   Register dreg = result->as_register();
1943 
1944   if (right->is_constant()) {
1945     jint divisor = right->as_constant_ptr()->as_jint();
1946     assert(divisor > 0 && is_power_of_2(divisor), "must be");
1947     if (code == lir_idiv) {
1948       assert(lreg == rax, "must be rax,");
1949       assert(temp->as_register() == rdx, "tmp register must be rdx");
1950       __ cdql(); // sign extend into rdx:rax
1951       if (divisor == 2) {
1952         __ subl(lreg, rdx);
1953       } else {
1954         __ andl(rdx, divisor - 1);
1955         __ addl(lreg, rdx);
1956       }
1957       __ sarl(lreg, log2i_exact(divisor));
1958       move_regs(lreg, dreg);
1959     } else if (code == lir_irem) {
1960       Label done;
1961       __ mov(dreg, lreg);
1962       __ andl(dreg, 0x80000000 | (divisor - 1));
1963       __ jcc(Assembler::positive, done);
1964       __ decrement(dreg);
1965       __ orl(dreg, ~(divisor - 1));
1966       __ increment(dreg);
1967       __ bind(done);
1968     } else {
1969       ShouldNotReachHere();
1970     }
1971   } else {
1972     Register rreg = right->as_register();
1973     assert(lreg == rax, "left register must be rax,");
1974     assert(rreg != rdx, "right register must not be rdx");
1975     assert(temp->as_register() == rdx, "tmp register must be rdx");
1976 
1977     move_regs(lreg, rax);
1978 
1979     int idivl_offset = __ corrected_idivl(rreg);
1980     if (ImplicitDiv0Checks) {
1981       add_debug_info_for_div0(idivl_offset, info);
1982     }
1983     if (code == lir_irem) {
1984       move_regs(rdx, dreg); // result is in rdx
1985     } else {
1986       move_regs(rax, dreg);
1987     }
1988   }
1989 }
1990 
1991 
1992 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1993   if (opr1->is_single_cpu()) {
1994     Register reg1 = opr1->as_register();
1995     if (opr2->is_single_cpu()) {
1996       // cpu register - cpu register
1997       if (is_reference_type(opr1->type())) {
1998         __ cmpoop(reg1, opr2->as_register());
1999       } else {
2000         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
2001         __ cmpl(reg1, opr2->as_register());
2002       }
2003     } else if (opr2->is_stack()) {
2004       // cpu register - stack
2005       if (is_reference_type(opr1->type())) {
2006         __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2007       } else {
2008         __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2009       }
2010     } else if (opr2->is_constant()) {
2011       // cpu register - constant
2012       LIR_Const* c = opr2->as_constant_ptr();
2013       if (c->type() == T_INT) {
2014         jint i = c->as_jint();
2015         if (i == 0) {
2016           __ testl(reg1, reg1);
2017         } else {
2018           __ cmpl(reg1, i);
2019         }
2020       } else if (c->type() == T_METADATA) {
2021         // All we need for now is a comparison with null for equality.
2022         assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
2023         Metadata* m = c->as_metadata();
2024         if (m == nullptr) {
2025           __ testptr(reg1, reg1);
2026         } else {
2027           ShouldNotReachHere();
2028         }
2029       } else if (is_reference_type(c->type())) {
2030         // In 64bit oops are single register
2031         jobject o = c->as_jobject();
2032         if (o == nullptr) {
2033           __ testptr(reg1, reg1);
2034         } else {
2035           __ cmpoop(reg1, o, rscratch1);
2036         }
2037       } else {
2038         fatal("unexpected type: %s", basictype_to_str(c->type()));
2039       }
2040       // cpu register - address
2041     } else if (opr2->is_address()) {
2042       if (op->info() != nullptr) {
2043         add_debug_info_for_null_check_here(op->info());
2044       }
2045       __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2046     } else {
2047       ShouldNotReachHere();
2048     }
2049 
2050   } else if(opr1->is_double_cpu()) {
2051     Register xlo = opr1->as_register_lo();
2052     Register xhi = opr1->as_register_hi();
2053     if (opr2->is_double_cpu()) {
2054       __ cmpptr(xlo, opr2->as_register_lo());
2055     } else if (opr2->is_constant()) {
2056       // cpu register - constant 0
2057       assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2058       __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2059     } else {
2060       ShouldNotReachHere();
2061     }
2062 
2063   } else if (opr1->is_single_xmm()) {
2064     XMMRegister reg1 = opr1->as_xmm_float_reg();
2065     if (opr2->is_single_xmm()) {
2066       // xmm register - xmm register
2067       __ ucomiss(reg1, opr2->as_xmm_float_reg());
2068     } else if (opr2->is_stack()) {
2069       // xmm register - stack
2070       __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2071     } else if (opr2->is_constant()) {
2072       // xmm register - constant
2073       __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2074     } else if (opr2->is_address()) {
2075       // xmm register - address
2076       if (op->info() != nullptr) {
2077         add_debug_info_for_null_check_here(op->info());
2078       }
2079       __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2080     } else {
2081       ShouldNotReachHere();
2082     }
2083 
2084   } else if (opr1->is_double_xmm()) {
2085     XMMRegister reg1 = opr1->as_xmm_double_reg();
2086     if (opr2->is_double_xmm()) {
2087       // xmm register - xmm register
2088       __ ucomisd(reg1, opr2->as_xmm_double_reg());
2089     } else if (opr2->is_stack()) {
2090       // xmm register - stack
2091       __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2092     } else if (opr2->is_constant()) {
2093       // xmm register - constant
2094       __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2095     } else if (opr2->is_address()) {
2096       // xmm register - address
2097       if (op->info() != nullptr) {
2098         add_debug_info_for_null_check_here(op->info());
2099       }
2100       __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2101     } else {
2102       ShouldNotReachHere();
2103     }
2104 
2105   } else if (opr1->is_address() && opr2->is_constant()) {
2106     LIR_Const* c = opr2->as_constant_ptr();
2107     if (is_reference_type(c->type())) {
2108       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2109       __ movoop(rscratch1, c->as_jobject());
2110     }
2111     if (op->info() != nullptr) {
2112       add_debug_info_for_null_check_here(op->info());
2113     }
2114     // special case: address - constant
2115     LIR_Address* addr = opr1->as_address_ptr();
2116     if (c->type() == T_INT) {
2117       __ cmpl(as_Address(addr), c->as_jint());
2118     } else if (is_reference_type(c->type())) {
2119       // %%% Make this explode if addr isn't reachable until we figure out a
2120       // better strategy by giving noreg as the temp for as_Address
2121       __ cmpoop(rscratch1, as_Address(addr, noreg));
2122     } else {
2123       ShouldNotReachHere();
2124     }
2125 
2126   } else {
2127     ShouldNotReachHere();
2128   }
2129 }
2130 
2131 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2132   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2133     if (left->is_single_xmm()) {
2134       assert(right->is_single_xmm(), "must match");
2135       __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2136     } else if (left->is_double_xmm()) {
2137       assert(right->is_double_xmm(), "must match");
2138       __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2139 
2140     } else {
2141       ShouldNotReachHere();
2142     }
2143   } else {
2144     assert(code == lir_cmp_l2i, "check");
2145     Label done;
2146     Register dest = dst->as_register();
2147     __ cmpptr(left->as_register_lo(), right->as_register_lo());
2148     __ movl(dest, -1);
2149     __ jccb(Assembler::less, done);
2150     __ setb(Assembler::notZero, dest);
2151     __ movzbl(dest, dest);
2152     __ bind(done);
2153   }
2154 }
2155 
2156 
2157 void LIR_Assembler::align_call(LIR_Code code) {
2158   // make sure that the displacement word of the call ends up word aligned
2159   int offset = __ offset();
2160   switch (code) {
2161   case lir_static_call:
2162   case lir_optvirtual_call:
2163   case lir_dynamic_call:
2164     offset += NativeCall::displacement_offset;
2165     break;
2166   case lir_icvirtual_call:
2167     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2168     break;
2169   default: ShouldNotReachHere();
2170   }
2171   __ align(BytesPerWord, offset);
2172 }
2173 
2174 
2175 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2176   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2177          "must be aligned");
2178   __ call(AddressLiteral(op->addr(), rtype));
2179   add_call_info(code_offset(), op->info());
2180   __ post_call_nop();
2181 }
2182 
2183 
2184 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2185   __ ic_call(op->addr());
2186   add_call_info(code_offset(), op->info());
2187   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2188          "must be aligned");
2189   __ post_call_nop();
2190 }
2191 
2192 
2193 void LIR_Assembler::emit_static_call_stub() {
2194   address call_pc = __ pc();
2195   address stub = __ start_a_stub(call_stub_size());
2196   if (stub == nullptr) {
2197     bailout("static call stub overflow");
2198     return;
2199   }
2200 
2201   int start = __ offset();
2202 
2203   // make sure that the displacement word of the call ends up word aligned
2204   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
2205   __ relocate(static_stub_Relocation::spec(call_pc));
2206   __ mov_metadata(rbx, (Metadata*)nullptr);
2207   // must be set to -1 at code generation time
2208   assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2209   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2210   __ jump(RuntimeAddress(__ pc()));
2211 
2212   assert(__ offset() - start <= call_stub_size(), "stub too big");
2213   __ end_a_stub();
2214 }
2215 
2216 
2217 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2218   assert(exceptionOop->as_register() == rax, "must match");
2219   assert(exceptionPC->as_register() == rdx, "must match");
2220 
2221   // exception object is not added to oop map by LinearScan
2222   // (LinearScan assumes that no oops are in fixed registers)
2223   info->add_register_oop(exceptionOop);
2224   StubId unwind_id;
2225 
2226   // get current pc information
2227   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2228   int pc_for_athrow_offset = __ offset();
2229   InternalAddress pc_for_athrow(__ pc());
2230   __ lea(exceptionPC->as_register(), pc_for_athrow);
2231   add_call_info(pc_for_athrow_offset, info); // for exception handler
2232 
2233   __ verify_not_null_oop(rax);
2234   // search an exception handler (rax: exception oop, rdx: throwing pc)
2235   if (compilation()->has_fpu_code()) {
2236     unwind_id = StubId::c1_handle_exception_id;
2237   } else {
2238     unwind_id = StubId::c1_handle_exception_nofpu_id;
2239   }
2240   __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2241 
2242   // enough room for two byte trap
2243   __ nop();
2244 }
2245 
2246 
2247 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2248   assert(exceptionOop->as_register() == rax, "must match");
2249 
2250   __ jmp(_unwind_handler_entry);
2251 }
2252 
2253 
2254 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2255 
2256   // optimized version for linear scan:
2257   // * count must be already in ECX (guaranteed by LinearScan)
2258   // * left and dest must be equal
2259   // * tmp must be unused
2260   assert(count->as_register() == SHIFT_count, "count must be in ECX");
2261   assert(left == dest, "left and dest must be equal");
2262   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2263 
2264   if (left->is_single_cpu()) {
2265     Register value = left->as_register();
2266     assert(value != SHIFT_count, "left cannot be ECX");
2267 
2268     switch (code) {
2269       case lir_shl:  __ shll(value); break;
2270       case lir_shr:  __ sarl(value); break;
2271       case lir_ushr: __ shrl(value); break;
2272       default: ShouldNotReachHere();
2273     }
2274   } else if (left->is_double_cpu()) {
2275     Register lo = left->as_register_lo();
2276     Register hi = left->as_register_hi();
2277     assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2278     switch (code) {
2279       case lir_shl:  __ shlptr(lo);        break;
2280       case lir_shr:  __ sarptr(lo);        break;
2281       case lir_ushr: __ shrptr(lo);        break;
2282       default: ShouldNotReachHere();
2283     }
2284   } else {
2285     ShouldNotReachHere();
2286   }
2287 }
2288 
2289 
2290 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2291   if (dest->is_single_cpu()) {
2292     // first move left into dest so that left is not destroyed by the shift
2293     Register value = dest->as_register();
2294     count = count & 0x1F; // Java spec
2295 
2296     move_regs(left->as_register(), value);
2297     switch (code) {
2298       case lir_shl:  __ shll(value, count); break;
2299       case lir_shr:  __ sarl(value, count); break;
2300       case lir_ushr: __ shrl(value, count); break;
2301       default: ShouldNotReachHere();
2302     }
2303   } else if (dest->is_double_cpu()) {
2304     // first move left into dest so that left is not destroyed by the shift
2305     Register value = dest->as_register_lo();
2306     count = count & 0x1F; // Java spec
2307 
2308     move_regs(left->as_register_lo(), value);
2309     switch (code) {
2310       case lir_shl:  __ shlptr(value, count); break;
2311       case lir_shr:  __ sarptr(value, count); break;
2312       case lir_ushr: __ shrptr(value, count); break;
2313       default: ShouldNotReachHere();
2314     }
2315   } else {
2316     ShouldNotReachHere();
2317   }
2318 }
2319 
2320 
2321 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2322   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2323   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2324   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2325   __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
2326 }
2327 
2328 
2329 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2330   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2331   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2332   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2333   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
2334 }
2335 
2336 
2337 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2338   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2339   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2340   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2341   __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
2342 }
2343 
2344 
2345 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
2346   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2347   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2348   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2349   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
2350 }
2351 
2352 
2353 // This code replaces a call to arraycopy; no exception may
2354 // be thrown in this code, they must be thrown in the System.arraycopy
2355 // activation frame; we could save some checks if this would not be the case
2356 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2357   ciArrayKlass* default_type = op->expected_type();
2358   Register src = op->src()->as_register();
2359   Register dst = op->dst()->as_register();
2360   Register src_pos = op->src_pos()->as_register();
2361   Register dst_pos = op->dst_pos()->as_register();
2362   Register length  = op->length()->as_register();
2363   Register tmp = op->tmp()->as_register();
2364   Register tmp_load_klass = rscratch1;
2365   Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
2366 
2367   CodeStub* stub = op->stub();
2368   int flags = op->flags();
2369   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2370   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2371 
2372   // if we don't know anything, just go through the generic arraycopy
2373   if (default_type == nullptr) {
2374     // save outgoing arguments on stack in case call to System.arraycopy is needed
2375     // HACK ALERT. This code used to push the parameters in a hardwired fashion
2376     // for interpreter calling conventions. Now we have to do it in new style conventions.
2377     // For the moment until C1 gets the new register allocator I just force all the
2378     // args to the right place (except the register args) and then on the back side
2379     // reload the register args properly if we go slow path. Yuck
2380 
2381     // These are proper for the calling convention
2382     store_parameter(length, 2);
2383     store_parameter(dst_pos, 1);
2384     store_parameter(dst, 0);
2385 
2386     // these are just temporary placements until we need to reload
2387     store_parameter(src_pos, 3);
2388     store_parameter(src, 4);
2389 
2390     address copyfunc_addr = StubRoutines::generic_arraycopy();
2391     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2392 
2393     // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
2394     // The arguments are in java calling convention so we can trivially shift them to C
2395     // convention
2396     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2397     __ mov(c_rarg0, j_rarg0);
2398     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2399     __ mov(c_rarg1, j_rarg1);
2400     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2401     __ mov(c_rarg2, j_rarg2);
2402     assert_different_registers(c_rarg3, j_rarg4);
2403     __ mov(c_rarg3, j_rarg3);
2404 #ifdef _WIN64
2405     // Allocate abi space for args but be sure to keep stack aligned
2406     __ subptr(rsp, 6*wordSize);
2407     store_parameter(j_rarg4, 4);
2408 #ifndef PRODUCT
2409     if (PrintC1Statistics) {
2410       __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2411     }
2412 #endif
2413     __ call(RuntimeAddress(copyfunc_addr));
2414     __ addptr(rsp, 6*wordSize);
2415 #else
2416     __ mov(c_rarg4, j_rarg4);
2417 #ifndef PRODUCT
2418     if (PrintC1Statistics) {
2419       __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2420     }
2421 #endif
2422     __ call(RuntimeAddress(copyfunc_addr));
2423 #endif // _WIN64
2424 
2425     __ testl(rax, rax);
2426     __ jcc(Assembler::equal, *stub->continuation());
2427 
2428     __ mov(tmp, rax);
2429     __ xorl(tmp, -1);
2430 
2431     // Reload values from the stack so they are where the stub
2432     // expects them.
2433     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
2434     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
2435     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
2436     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
2437     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
2438 
2439     __ subl(length, tmp);
2440     __ addl(src_pos, tmp);
2441     __ addl(dst_pos, tmp);
2442     __ jmp(*stub->entry());
2443 
2444     __ bind(*stub->continuation());
2445     return;
2446   }
2447 
2448   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2449 
2450   int elem_size = type2aelembytes(basic_type);
2451   Address::ScaleFactor scale;
2452 
2453   switch (elem_size) {
2454     case 1 :
2455       scale = Address::times_1;
2456       break;
2457     case 2 :
2458       scale = Address::times_2;
2459       break;
2460     case 4 :
2461       scale = Address::times_4;
2462       break;
2463     case 8 :
2464       scale = Address::times_8;
2465       break;
2466     default:
2467       scale = Address::no_scale;
2468       ShouldNotReachHere();
2469   }
2470 
2471   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2472   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2473 
2474   // length and pos's are all sign extended at this point on 64bit
2475 
2476   // test for null
2477   if (flags & LIR_OpArrayCopy::src_null_check) {
2478     __ testptr(src, src);
2479     __ jcc(Assembler::zero, *stub->entry());
2480   }
2481   if (flags & LIR_OpArrayCopy::dst_null_check) {
2482     __ testptr(dst, dst);
2483     __ jcc(Assembler::zero, *stub->entry());
2484   }
2485 
2486   // If the compiler was not able to prove that exact type of the source or the destination
2487   // of the arraycopy is an array type, check at runtime if the source or the destination is
2488   // an instance type.
2489   if (flags & LIR_OpArrayCopy::type_check) {
2490     if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2491       __ load_klass(tmp, dst, tmp_load_klass);
2492       __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2493       __ jcc(Assembler::greaterEqual, *stub->entry());
2494     }
2495 
2496     if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2497       __ load_klass(tmp, src, tmp_load_klass);
2498       __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2499       __ jcc(Assembler::greaterEqual, *stub->entry());
2500     }
2501   }
2502 
2503   // check if negative
2504   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2505     __ testl(src_pos, src_pos);
2506     __ jcc(Assembler::less, *stub->entry());
2507   }
2508   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2509     __ testl(dst_pos, dst_pos);
2510     __ jcc(Assembler::less, *stub->entry());
2511   }
2512 
2513   if (flags & LIR_OpArrayCopy::src_range_check) {
2514     __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
2515     __ cmpl(tmp, src_length_addr);
2516     __ jcc(Assembler::above, *stub->entry());
2517   }
2518   if (flags & LIR_OpArrayCopy::dst_range_check) {
2519     __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
2520     __ cmpl(tmp, dst_length_addr);
2521     __ jcc(Assembler::above, *stub->entry());
2522   }
2523 
2524   if (flags & LIR_OpArrayCopy::length_positive_check) {
2525     __ testl(length, length);
2526     __ jcc(Assembler::less, *stub->entry());
2527   }
2528 
2529   __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
2530   __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
2531 
2532   if (flags & LIR_OpArrayCopy::type_check) {
2533     // We don't know the array types are compatible
2534     if (basic_type != T_OBJECT) {
2535       // Simple test for basic type arrays
2536       __ cmp_klasses_from_objects(src, dst, tmp, tmp2);
2537       __ jcc(Assembler::notEqual, *stub->entry());
2538     } else {
2539       // For object arrays, if src is a sub class of dst then we can
2540       // safely do the copy.
2541       Label cont, slow;
2542 
2543       __ push_ppx(src);
2544       __ push_ppx(dst);
2545 
2546       __ load_klass(src, src, tmp_load_klass);
2547       __ load_klass(dst, dst, tmp_load_klass);
2548 
2549       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2550 
2551       __ push_ppx(src);
2552       __ push_ppx(dst);
2553       __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2554       __ pop_ppx(dst);
2555       __ pop_ppx(src);
2556 
2557       __ testl(src, src);
2558       __ jcc(Assembler::notEqual, cont);
2559 
2560       __ bind(slow);
2561       __ pop_ppx(dst);
2562       __ pop_ppx(src);
2563 
2564       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2565       if (copyfunc_addr != nullptr) { // use stub if available
2566         // src is not a sub class of dst so we have to do a
2567         // per-element check.
2568 
2569         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2570         if ((flags & mask) != mask) {
2571           // Check that at least both of them object arrays.
2572           assert(flags & mask, "one of the two should be known to be an object array");
2573 
2574           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2575             __ load_klass(tmp, src, tmp_load_klass);
2576           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2577             __ load_klass(tmp, dst, tmp_load_klass);
2578           }
2579           int lh_offset = in_bytes(Klass::layout_helper_offset());
2580           Address klass_lh_addr(tmp, lh_offset);
2581           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2582           __ cmpl(klass_lh_addr, objArray_lh);
2583           __ jcc(Assembler::notEqual, *stub->entry());
2584         }
2585 
2586        // Spill because stubs can use any register they like and it's
2587        // easier to restore just those that we care about.
2588        store_parameter(dst, 0);
2589        store_parameter(dst_pos, 1);
2590        store_parameter(length, 2);
2591        store_parameter(src_pos, 3);
2592        store_parameter(src, 4);
2593 
2594         __ movl2ptr(length, length); //higher 32bits must be null
2595 
2596         __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2597         assert_different_registers(c_rarg0, dst, dst_pos, length);
2598         __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2599         assert_different_registers(c_rarg1, dst, length);
2600 
2601         __ mov(c_rarg2, length);
2602         assert_different_registers(c_rarg2, dst);
2603 
2604 #ifdef _WIN64
2605         // Allocate abi space for args but be sure to keep stack aligned
2606         __ subptr(rsp, 6*wordSize);
2607         __ load_klass(c_rarg3, dst, tmp_load_klass);
2608         __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
2609         store_parameter(c_rarg3, 4);
2610         __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
2611         __ call(RuntimeAddress(copyfunc_addr));
2612         __ addptr(rsp, 6*wordSize);
2613 #else
2614         __ load_klass(c_rarg4, dst, tmp_load_klass);
2615         __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2616         __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2617         __ call(RuntimeAddress(copyfunc_addr));
2618 #endif
2619 
2620 #ifndef PRODUCT
2621         if (PrintC1Statistics) {
2622           Label failed;
2623           __ testl(rax, rax);
2624           __ jcc(Assembler::notZero, failed);
2625           __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1);
2626           __ bind(failed);
2627         }
2628 #endif
2629 
2630         __ testl(rax, rax);
2631         __ jcc(Assembler::zero, *stub->continuation());
2632 
2633 #ifndef PRODUCT
2634         if (PrintC1Statistics) {
2635           __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1);
2636         }
2637 #endif
2638 
2639         __ mov(tmp, rax);
2640 
2641         __ xorl(tmp, -1);
2642 
2643         // Restore previously spilled arguments
2644         __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
2645         __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
2646         __ movptr   (length,  Address(rsp, 2*BytesPerWord));
2647         __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
2648         __ movptr   (src,     Address(rsp, 4*BytesPerWord));
2649 
2650 
2651         __ subl(length, tmp);
2652         __ addl(src_pos, tmp);
2653         __ addl(dst_pos, tmp);
2654       }
2655 
2656       __ jmp(*stub->entry());
2657 
2658       __ bind(cont);
2659       __ pop(dst);
2660       __ pop(src);
2661     }
2662   }
2663 
2664 #ifdef ASSERT
2665   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2666     // Sanity check the known type with the incoming class.  For the
2667     // primitive case the types must match exactly with src.klass and
2668     // dst.klass each exactly matching the default type.  For the
2669     // object array case, if no type check is needed then either the
2670     // dst type is exactly the expected type and the src type is a
2671     // subtype which we can't check or src is the same array as dst
2672     // but not necessarily exactly of type default_type.
2673     Label known_ok, halt;
2674     __ mov_metadata(tmp, default_type->constant_encoding());
2675     if (UseCompressedClassPointers) {
2676       __ encode_klass_not_null(tmp, rscratch1);
2677     }
2678 
2679     if (basic_type != T_OBJECT) {
2680       __ cmp_klass(tmp, dst, tmp2);
2681       __ jcc(Assembler::notEqual, halt);
2682       __ cmp_klass(tmp, src, tmp2);
2683       __ jcc(Assembler::equal, known_ok);
2684     } else {
2685       __ cmp_klass(tmp, dst, tmp2);
2686       __ jcc(Assembler::equal, known_ok);
2687       __ cmpptr(src, dst);
2688       __ jcc(Assembler::equal, known_ok);
2689     }
2690     __ bind(halt);
2691     __ stop("incorrect type information in arraycopy");
2692     __ bind(known_ok);
2693   }
2694 #endif
2695 
2696 #ifndef PRODUCT
2697   if (PrintC1Statistics) {
2698     __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
2699   }
2700 #endif
2701 
2702   assert_different_registers(c_rarg0, dst, dst_pos, length);
2703   __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2704   assert_different_registers(c_rarg1, length);
2705   __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2706   __ mov(c_rarg2, length);
2707 
2708   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2709   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2710   const char *name;
2711   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2712   __ call_VM_leaf(entry, 0);
2713 
2714   if (stub != nullptr) {
2715     __ bind(*stub->continuation());
2716   }
2717 }
2718 
2719 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2720   assert(op->crc()->is_single_cpu(),  "crc must be register");
2721   assert(op->val()->is_single_cpu(),  "byte value must be register");
2722   assert(op->result_opr()->is_single_cpu(), "result must be register");
2723   Register crc = op->crc()->as_register();
2724   Register val = op->val()->as_register();
2725   Register res = op->result_opr()->as_register();
2726 
2727   assert_different_registers(val, crc, res);
2728 
2729   __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
2730   __ notl(crc); // ~crc
2731   __ update_byte_crc32(crc, val, res);
2732   __ notl(crc); // ~crc
2733   __ mov(res, crc);
2734 }
2735 
2736 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2737   Register obj = op->obj_opr()->as_register();  // may not be an oop
2738   Register hdr = op->hdr_opr()->as_register();
2739   Register lock = op->lock_opr()->as_register();
2740   if (op->code() == lir_lock) {
2741     Register tmp = op->scratch_opr()->as_register();
2742     // add debug info for NullPointerException only if one is possible
2743     int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2744     if (op->info() != nullptr) {
2745       add_debug_info_for_null_check(null_check_offset, op->info());
2746     }
2747     // done
2748   } else if (op->code() == lir_unlock) {
2749     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2750   } else {
2751     Unimplemented();
2752   }
2753   __ bind(*op->stub()->continuation());
2754 }
2755 
2756 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2757   Register obj = op->obj()->as_pointer_register();
2758   Register result = op->result_opr()->as_pointer_register();
2759 
2760   CodeEmitInfo* info = op->info();
2761   if (info != nullptr) {
2762     add_debug_info_for_null_check_here(info);
2763   }
2764 
2765   __ load_klass(result, obj, rscratch1);
2766 }
2767 
2768 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2769   ciMethod* method = op->profiled_method();
2770   int bci          = op->profiled_bci();
2771   ciMethod* callee = op->profiled_callee();
2772   Register tmp_load_klass = rscratch1;
2773 
2774   // Update counter for all call types
2775   ciMethodData* md = method->method_data_or_null();
2776   assert(md != nullptr, "Sanity");
2777   ciProfileData* data = md->bci_to_data(bci);
2778   assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2779   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2780   Register mdo  = op->mdo()->as_register();
2781   __ mov_metadata(mdo, md->constant_encoding());
2782   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2783   // Perform additional virtual call profiling for invokevirtual and
2784   // invokeinterface bytecodes
2785   if (op->should_profile_receiver_type()) {
2786     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2787     Register recv = op->recv()->as_register();
2788     assert_different_registers(mdo, recv);
2789     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2790     ciKlass* known_klass = op->known_holder();
2791     if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2792       // We know the type that will be seen at this call site; we can
2793       // statically update the MethodData* rather than needing to do
2794       // dynamic tests on the receiver type
2795 
2796       // NOTE: we should probably put a lock around this search to
2797       // avoid collisions by concurrent compilations
2798       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2799       uint i;
2800       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2801         ciKlass* receiver = vc_data->receiver(i);
2802         if (known_klass->equals(receiver)) {
2803           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2804           __ addptr(data_addr, DataLayout::counter_increment);
2805           return;
2806         }
2807       }
2808 
2809       // Receiver type not found in profile data; select an empty slot
2810 
2811       // Note that this is less efficient than it should be because it
2812       // always does a write to the receiver part of the
2813       // VirtualCallData rather than just the first time
2814       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2815         ciKlass* receiver = vc_data->receiver(i);
2816         if (receiver == nullptr) {
2817           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2818           __ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1);
2819           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2820           __ addptr(data_addr, DataLayout::counter_increment);
2821           return;
2822         }
2823       }
2824     } else {
2825       __ load_klass(recv, recv, tmp_load_klass);
2826       Label update_done;
2827       type_profile_helper(mdo, md, data, recv, &update_done);
2828       // Receiver did not match any saved receiver and there is no empty row for it.
2829       // Increment total counter to indicate polymorphic case.
2830       __ addptr(counter_addr, DataLayout::counter_increment);
2831 
2832       __ bind(update_done);
2833     }
2834   } else {
2835     // Static call
2836     __ addptr(counter_addr, DataLayout::counter_increment);
2837   }
2838 }
2839 
2840 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2841   Register obj = op->obj()->as_register();
2842   Register tmp = op->tmp()->as_pointer_register();
2843   Register tmp_load_klass = rscratch1;
2844   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2845   ciKlass* exact_klass = op->exact_klass();
2846   intptr_t current_klass = op->current_klass();
2847   bool not_null = op->not_null();
2848   bool no_conflict = op->no_conflict();
2849 
2850   Label update, next, none;
2851 
2852   bool do_null = !not_null;
2853   bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2854   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2855 
2856   assert(do_null || do_update, "why are we here?");
2857   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2858 
2859   __ verify_oop(obj);
2860 
2861 #ifdef ASSERT
2862   if (obj == tmp) {
2863     assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
2864   } else {
2865     assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
2866   }
2867 #endif
2868   if (do_null) {
2869     __ testptr(obj, obj);
2870     __ jccb(Assembler::notZero, update);
2871     if (!TypeEntries::was_null_seen(current_klass)) {
2872       __ testptr(mdo_addr, TypeEntries::null_seen);
2873 #ifndef ASSERT
2874       __ jccb(Assembler::notZero, next); // already set
2875 #else
2876       __ jcc(Assembler::notZero, next); // already set
2877 #endif
2878       // atomic update to prevent overwriting Klass* with 0
2879       __ lock();
2880       __ orptr(mdo_addr, TypeEntries::null_seen);
2881     }
2882     if (do_update) {
2883 #ifndef ASSERT
2884       __ jmpb(next);
2885     }
2886 #else
2887       __ jmp(next);
2888     }
2889   } else {
2890     __ testptr(obj, obj);
2891     __ jcc(Assembler::notZero, update);
2892     __ stop("unexpected null obj");
2893 #endif
2894   }
2895 
2896   __ bind(update);
2897 
2898   if (do_update) {
2899 #ifdef ASSERT
2900     if (exact_klass != nullptr) {
2901       Label ok;
2902       __ load_klass(tmp, obj, tmp_load_klass);
2903       __ push_ppx(tmp);
2904       __ mov_metadata(tmp, exact_klass->constant_encoding());
2905       __ cmpptr(tmp, Address(rsp, 0));
2906       __ jcc(Assembler::equal, ok);
2907       __ stop("exact klass and actual klass differ");
2908       __ bind(ok);
2909       __ pop_ppx(tmp);
2910     }
2911 #endif
2912     if (!no_conflict) {
2913       if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2914         if (exact_klass != nullptr) {
2915           __ mov_metadata(tmp, exact_klass->constant_encoding());
2916         } else {
2917           __ load_klass(tmp, obj, tmp_load_klass);
2918         }
2919         __ mov(rscratch1, tmp); // save original value before XOR
2920         __ xorptr(tmp, mdo_addr);
2921         __ testptr(tmp, TypeEntries::type_klass_mask);
2922         // klass seen before, nothing to do. The unknown bit may have been
2923         // set already but no need to check.
2924         __ jccb(Assembler::zero, next);
2925 
2926         __ testptr(tmp, TypeEntries::type_unknown);
2927         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2928 
2929         if (TypeEntries::is_type_none(current_klass)) {
2930           __ testptr(mdo_addr, TypeEntries::type_mask);
2931           __ jccb(Assembler::zero, none);
2932           // There is a chance that the checks above (re-reading profiling
2933           // data from memory) fail if another thread has just set the
2934           // profiling to this obj's klass
2935           __ mov(tmp, rscratch1); // get back original value before XOR
2936           __ xorptr(tmp, mdo_addr);
2937           __ testptr(tmp, TypeEntries::type_klass_mask);
2938           __ jccb(Assembler::zero, next);
2939         }
2940       } else {
2941         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2942                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2943 
2944         __ testptr(mdo_addr, TypeEntries::type_unknown);
2945         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2946       }
2947 
2948       // different than before. Cannot keep accurate profile.
2949       __ orptr(mdo_addr, TypeEntries::type_unknown);
2950 
2951       if (TypeEntries::is_type_none(current_klass)) {
2952         __ jmpb(next);
2953 
2954         __ bind(none);
2955         // first time here. Set profile type.
2956         __ movptr(mdo_addr, tmp);
2957 #ifdef ASSERT
2958         __ andptr(tmp, TypeEntries::type_klass_mask);
2959         __ verify_klass_ptr(tmp);
2960 #endif
2961       }
2962     } else {
2963       // There's a single possible klass at this profile point
2964       assert(exact_klass != nullptr, "should be");
2965       if (TypeEntries::is_type_none(current_klass)) {
2966         __ mov_metadata(tmp, exact_klass->constant_encoding());
2967         __ xorptr(tmp, mdo_addr);
2968         __ testptr(tmp, TypeEntries::type_klass_mask);
2969 #ifdef ASSERT
2970         __ jcc(Assembler::zero, next);
2971 
2972         {
2973           Label ok;
2974           __ push_ppx(tmp);
2975           __ testptr(mdo_addr, TypeEntries::type_mask);
2976           __ jcc(Assembler::zero, ok);
2977           // may have been set by another thread
2978           __ mov_metadata(tmp, exact_klass->constant_encoding());
2979           __ xorptr(tmp, mdo_addr);
2980           __ testptr(tmp, TypeEntries::type_mask);
2981           __ jcc(Assembler::zero, ok);
2982 
2983           __ stop("unexpected profiling mismatch");
2984           __ bind(ok);
2985           __ pop_ppx(tmp);
2986         }
2987 #else
2988         __ jccb(Assembler::zero, next);
2989 #endif
2990         // first time here. Set profile type.
2991         __ movptr(mdo_addr, tmp);
2992 #ifdef ASSERT
2993         __ andptr(tmp, TypeEntries::type_klass_mask);
2994         __ verify_klass_ptr(tmp);
2995 #endif
2996       } else {
2997         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2998                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2999 
3000         __ testptr(mdo_addr, TypeEntries::type_unknown);
3001         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3002 
3003         __ orptr(mdo_addr, TypeEntries::type_unknown);
3004       }
3005     }
3006   }
3007   __ bind(next);
3008 }
3009 
3010 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3011   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3012 }
3013 
3014 
3015 void LIR_Assembler::align_backward_branch_target() {
3016   __ align(BytesPerWord);
3017 }
3018 
3019 
3020 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3021   if (left->is_single_cpu()) {
3022     __ negl(left->as_register());
3023     move_regs(left->as_register(), dest->as_register());
3024 
3025   } else if (left->is_double_cpu()) {
3026     Register lo = left->as_register_lo();
3027     Register dst = dest->as_register_lo();
3028     __ movptr(dst, lo);
3029     __ negptr(dst);
3030 
3031   } else if (dest->is_single_xmm()) {
3032     assert(!tmp->is_valid(), "do not need temporary");
3033     if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3034       __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3035     }
3036     __ xorps(dest->as_xmm_float_reg(),
3037              ExternalAddress((address)float_signflip_pool),
3038              rscratch1);
3039   } else if (dest->is_double_xmm()) {
3040     assert(!tmp->is_valid(), "do not need temporary");
3041     if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3042       __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3043     }
3044     __ xorpd(dest->as_xmm_double_reg(),
3045              ExternalAddress((address)double_signflip_pool),
3046              rscratch1);
3047   } else {
3048     ShouldNotReachHere();
3049   }
3050 }
3051 
3052 
3053 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3054   assert(src->is_address(), "must be an address");
3055   assert(dest->is_register(), "must be a register");
3056 
3057   PatchingStub* patch = nullptr;
3058   if (patch_code != lir_patch_none) {
3059     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3060   }
3061 
3062   Register reg = dest->as_pointer_register();
3063   LIR_Address* addr = src->as_address_ptr();
3064   __ lea(reg, as_Address(addr));
3065 
3066   if (patch != nullptr) {
3067     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3068   }
3069 }
3070 
3071 
3072 
3073 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3074   assert(!tmp->is_valid(), "don't need temporary");
3075   __ call(RuntimeAddress(dest));
3076   if (info != nullptr) {
3077     add_call_info_here(info);
3078   }
3079   __ post_call_nop();
3080 }
3081 
3082 
3083 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3084   assert(type == T_LONG, "only for volatile long fields");
3085 
3086   if (info != nullptr) {
3087     add_debug_info_for_null_check_here(info);
3088   }
3089 
3090   if (src->is_double_xmm()) {
3091     if (dest->is_double_cpu()) {
3092       __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3093     } else if (dest->is_double_stack()) {
3094       __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3095     } else if (dest->is_address()) {
3096       __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3097     } else {
3098       ShouldNotReachHere();
3099     }
3100 
3101   } else if (dest->is_double_xmm()) {
3102     if (src->is_double_stack()) {
3103       __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3104     } else if (src->is_address()) {
3105       __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3106     } else {
3107       ShouldNotReachHere();
3108     }
3109 
3110   } else {
3111     ShouldNotReachHere();
3112   }
3113 }
3114 
3115 #ifdef ASSERT
3116 // emit run-time assertion
3117 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3118   assert(op->code() == lir_assert, "must be");
3119 
3120   if (op->in_opr1()->is_valid()) {
3121     assert(op->in_opr2()->is_valid(), "both operands must be valid");
3122     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3123   } else {
3124     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3125     assert(op->condition() == lir_cond_always, "no other conditions allowed");
3126   }
3127 
3128   Label ok;
3129   if (op->condition() != lir_cond_always) {
3130     Assembler::Condition acond = Assembler::zero;
3131     switch (op->condition()) {
3132       case lir_cond_equal:        acond = Assembler::equal;       break;
3133       case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
3134       case lir_cond_less:         acond = Assembler::less;        break;
3135       case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
3136       case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3137       case lir_cond_greater:      acond = Assembler::greater;     break;
3138       case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
3139       case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
3140       default:                    ShouldNotReachHere();
3141     }
3142     __ jcc(acond, ok);
3143   }
3144   if (op->halt()) {
3145     const char* str = __ code_string(op->msg());
3146     __ stop(str);
3147   } else {
3148     breakpoint();
3149   }
3150   __ bind(ok);
3151 }
3152 #endif
3153 
3154 void LIR_Assembler::membar() {
3155   // QQQ sparc TSO uses this,
3156   __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3157 }
3158 
3159 void LIR_Assembler::membar_acquire() {
3160   // No x86 machines currently require load fences
3161 }
3162 
3163 void LIR_Assembler::membar_release() {
3164   // No x86 machines currently require store fences
3165 }
3166 
3167 void LIR_Assembler::membar_loadload() {
3168   // no-op
3169   //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3170 }
3171 
3172 void LIR_Assembler::membar_storestore() {
3173   // no-op
3174   //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3175 }
3176 
3177 void LIR_Assembler::membar_loadstore() {
3178   // no-op
3179   //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3180 }
3181 
3182 void LIR_Assembler::membar_storeload() {
3183   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3184 }
3185 
3186 void LIR_Assembler::on_spin_wait() {
3187   __ pause ();
3188 }
3189 
3190 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3191   assert(result_reg->is_register(), "check");
3192   __ mov(result_reg->as_register(), r15_thread);
3193 }
3194 
3195 
3196 void LIR_Assembler::peephole(LIR_List*) {
3197   // do nothing for now
3198 }
3199 
3200 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3201   assert(data == dest, "xchg/xadd uses only 2 operands");
3202 
3203   if (data->type() == T_INT) {
3204     if (code == lir_xadd) {
3205       __ lock();
3206       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3207     } else {
3208       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3209     }
3210   } else if (data->is_oop()) {
3211     assert (code == lir_xchg, "xadd for oops");
3212     Register obj = data->as_register();
3213     if (UseCompressedOops) {
3214       __ encode_heap_oop(obj);
3215       __ xchgl(obj, as_Address(src->as_address_ptr()));
3216       __ decode_heap_oop(obj);
3217     } else {
3218       __ xchgptr(obj, as_Address(src->as_address_ptr()));
3219     }
3220   } else if (data->type() == T_LONG) {
3221     assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3222     if (code == lir_xadd) {
3223       __ lock();
3224       __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3225     } else {
3226       __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3227     }
3228   } else {
3229     ShouldNotReachHere();
3230   }
3231 }
3232 
3233 #undef __