1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gc_globals.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_x86.inline.hpp"
  46 
  47 
  48 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  49 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  50 // fast versions of NegF/NegD and AbsF/AbsD.
  51 
  52 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  53 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  54   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  55   // of 128-bits operands for SSE instructions.
  56   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  57   // Store the value to a 128-bits operand.
  58   operand[0] = lo;
  59   operand[1] = hi;
  60   return operand;
  61 }
  62 
  63 // Buffer for 128-bits masks used by SSE instructions.
  64 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  65 
  66 // Static initialization during VM startup.
  67 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  68 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  69 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  70 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  71 
  72 
  73 NEEDS_CLEANUP // remove this definitions ?
  74 const Register SYNC_header = rax;   // synchronization header
  75 const Register SHIFT_count = rcx;   // where count for shift operations must be
  76 
  77 #define __ _masm->
  78 
  79 
  80 static void select_different_registers(Register preserve,
  81                                        Register extra,
  82                                        Register &tmp1,
  83                                        Register &tmp2) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, extra);
  89     tmp2 = extra;
  90   }
  91   assert_different_registers(preserve, tmp1, tmp2);
  92 }
  93 
  94 
  95 
  96 static void select_different_registers(Register preserve,
  97                                        Register extra,
  98                                        Register &tmp1,
  99                                        Register &tmp2,
 100                                        Register &tmp3) {
 101   if (tmp1 == preserve) {
 102     assert_different_registers(tmp1, tmp2, tmp3, extra);
 103     tmp1 = extra;
 104   } else if (tmp2 == preserve) {
 105     assert_different_registers(tmp1, tmp2, tmp3, extra);
 106     tmp2 = extra;
 107   } else if (tmp3 == preserve) {
 108     assert_different_registers(tmp1, tmp2, tmp3, extra);
 109     tmp3 = extra;
 110   }
 111   assert_different_registers(preserve, tmp1, tmp2, tmp3);
 112 }
 113 
 114 
 115 
 116 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
 117   if (opr->is_constant()) {
 118     LIR_Const* constant = opr->as_constant_ptr();
 119     switch (constant->type()) {
 120       case T_INT: {
 121         return true;
 122       }
 123 
 124       default:
 125         return false;
 126     }
 127   }
 128   return false;
 129 }
 130 
 131 
 132 LIR_Opr LIR_Assembler::receiverOpr() {
 133   return FrameMap::receiver_opr;
 134 }
 135 
 136 LIR_Opr LIR_Assembler::osrBufferPointer() {
 137   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 138 }
 139 
 140 //--------------fpu register translations-----------------------
 141 
 142 
 143 address LIR_Assembler::float_constant(float f) {
 144   address const_addr = __ float_constant(f);
 145   if (const_addr == nullptr) {
 146     bailout("const section overflow");
 147     return __ code()->consts()->start();
 148   } else {
 149     return const_addr;
 150   }
 151 }
 152 
 153 
 154 address LIR_Assembler::double_constant(double d) {
 155   address const_addr = __ double_constant(d);
 156   if (const_addr == nullptr) {
 157     bailout("const section overflow");
 158     return __ code()->consts()->start();
 159   } else {
 160     return const_addr;
 161   }
 162 }
 163 
 164 void LIR_Assembler::breakpoint() {
 165   __ int3();
 166 }
 167 
 168 void LIR_Assembler::push(LIR_Opr opr) {
 169   if (opr->is_single_cpu()) {
 170     __ push_reg(opr->as_register());
 171   } else if (opr->is_double_cpu()) {
 172     __ push_reg(opr->as_register_lo());
 173   } else if (opr->is_stack()) {
 174     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 175   } else if (opr->is_constant()) {
 176     LIR_Const* const_opr = opr->as_constant_ptr();
 177     if (const_opr->type() == T_OBJECT) {
 178       __ push_oop(const_opr->as_jobject(), rscratch1);
 179     } else if (const_opr->type() == T_INT) {
 180       __ push_jint(const_opr->as_jint());
 181     } else {
 182       ShouldNotReachHere();
 183     }
 184 
 185   } else {
 186     ShouldNotReachHere();
 187   }
 188 }
 189 
 190 void LIR_Assembler::pop(LIR_Opr opr) {
 191   if (opr->is_single_cpu()) {
 192     __ pop_reg(opr->as_register());
 193   } else {
 194     ShouldNotReachHere();
 195   }
 196 }
 197 
 198 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
 199   return addr->base()->is_illegal() && addr->index()->is_illegal();
 200 }
 201 
 202 //-------------------------------------------
 203 
 204 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 205   return as_Address(addr, rscratch1);
 206 }
 207 
 208 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 209   if (addr->base()->is_illegal()) {
 210     assert(addr->index()->is_illegal(), "must be illegal too");
 211     AddressLiteral laddr((address)addr->disp(), relocInfo::none);
 212     if (! __ reachable(laddr)) {
 213       __ movptr(tmp, laddr.addr());
 214       Address res(tmp, 0);
 215       return res;
 216     } else {
 217       return __ as_Address(laddr);
 218     }
 219   }
 220 
 221   Register base = addr->base()->as_pointer_register();
 222 
 223   if (addr->index()->is_illegal()) {
 224     return Address( base, addr->disp());
 225   } else if (addr->index()->is_cpu_register()) {
 226     Register index = addr->index()->as_pointer_register();
 227     return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
 228   } else if (addr->index()->is_constant()) {
 229     intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
 230     assert(Assembler::is_simm32(addr_offset), "must be");
 231 
 232     return Address(base, addr_offset);
 233   } else {
 234     Unimplemented();
 235     return Address();
 236   }
 237 }
 238 
 239 
 240 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 241   Address base = as_Address(addr);
 242   return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
 243 }
 244 
 245 
 246 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 247   return as_Address(addr);
 248 }
 249 
 250 
 251 void LIR_Assembler::osr_entry() {
 252   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 253   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 254   ValueStack* entry_state = osr_entry->state();
 255   int number_of_locks = entry_state->locks_size();
 256 
 257   // we jump here if osr happens with the interpreter
 258   // state set up to continue at the beginning of the
 259   // loop that triggered osr - in particular, we have
 260   // the following registers setup:
 261   //
 262   // rcx: osr buffer
 263   //
 264 
 265   // build frame
 266   ciMethod* m = compilation()->method();
 267   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 268 
 269   // OSR buffer is
 270   //
 271   // locals[nlocals-1..0]
 272   // monitors[0..number_of_locks]
 273   //
 274   // locals is a direct copy of the interpreter frame so in the osr buffer
 275   // so first slot in the local array is the last local from the interpreter
 276   // and last slot is local[0] (receiver) from the interpreter
 277   //
 278   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 279   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 280   // in the interpreter frame (the method lock if a sync method)
 281 
 282   // Initialize monitors in the compiled activation.
 283   //   rcx: pointer to osr buffer
 284   //
 285   // All other registers are dead at this point and the locals will be
 286   // copied into place by code emitted in the IR.
 287 
 288   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 289   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 290     int monitor_offset = BytesPerWord * method()->max_locals() +
 291       (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
 292     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 293     // the OSR buffer using 2 word entries: first the lock and then
 294     // the oop.
 295     for (int i = 0; i < number_of_locks; i++) {
 296       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 297 #ifdef ASSERT
 298       // verify the interpreter's monitor has a non-null object
 299       {
 300         Label L;
 301         __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
 302         __ jcc(Assembler::notZero, L);
 303         __ stop("locked object is null");
 304         __ bind(L);
 305       }
 306 #endif
 307       __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
 308       __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
 309       __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 310       __ movptr(frame_map()->address_for_monitor_object(i), rbx);
 311     }
 312   }
 313 }
 314 
 315 
 316 // inline cache check; done before the frame is built.
 317 int LIR_Assembler::check_icache() {
 318   return __ ic_check(CodeEntryAlignment);
 319 }
 320 
 321 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 322   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 323   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
 324 
 325   Label L_skip_barrier;
 326   Register klass = rscratch1;
 327 
 328   __ mov_metadata(klass, method->holder()->constant_encoding());
 329   __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
 330 
 331   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 332 
 333   __ bind(L_skip_barrier);
 334 }
 335 
 336 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
 337   jobject o = nullptr;
 338   PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
 339   __ movoop(reg, o);
 340   patching_epilog(patch, lir_patch_normal, reg, info);
 341 }
 342 
 343 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 344   Metadata* o = nullptr;
 345   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
 346   __ mov_metadata(reg, o);
 347   patching_epilog(patch, lir_patch_normal, reg, info);
 348 }
 349 
 350 // This specifies the rsp decrement needed to build the frame
 351 int LIR_Assembler::initial_frame_size_in_bytes() const {
 352   // if rounding, must let FrameMap know!
 353 
 354   // The frame_map records size in slots (32bit word)
 355 
 356   // subtract two words to account for return address and link
 357   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 358 }
 359 
 360 
 361 int LIR_Assembler::emit_exception_handler() {
 362   // generate code for exception handler
 363   address handler_base = __ start_a_stub(exception_handler_size());
 364   if (handler_base == nullptr) {
 365     // not enough space left for the handler
 366     bailout("exception handler overflow");
 367     return -1;
 368   }
 369 
 370   int offset = code_offset();
 371 
 372   // the exception oop and pc are in rax, and rdx
 373   // no other registers need to be preserved, so invalidate them
 374   __ invalidate_registers(false, true, true, false, true, true);
 375 
 376   // check that there is really an exception
 377   __ verify_not_null_oop(rax);
 378 
 379   // search an exception handler (rax: exception oop, rdx: throwing pc)
 380   __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)));
 381   __ should_not_reach_here();
 382   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 383   __ end_a_stub();
 384 
 385   return offset;
 386 }
 387 
 388 
 389 // Emit the code to remove the frame from the stack in the exception
 390 // unwind path.
 391 int LIR_Assembler::emit_unwind_handler() {
 392 #ifndef PRODUCT
 393   if (CommentedAssembly) {
 394     _masm->block_comment("Unwind handler");
 395   }
 396 #endif
 397 
 398   int offset = code_offset();
 399 
 400   // Fetch the exception from TLS and clear out exception related thread state
 401   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
 402   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
 403   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
 404 
 405   __ bind(_unwind_handler_entry);
 406   __ verify_not_null_oop(rax);
 407   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 408     __ mov(rbx, rax);  // Preserve the exception (rbx is always callee-saved)
 409   }
 410 
 411   // Perform needed unlocking
 412   MonitorExitStub* stub = nullptr;
 413   if (method()->is_synchronized()) {
 414     monitor_address(0, FrameMap::rax_opr);
 415     stub = new MonitorExitStub(FrameMap::rax_opr, 0);
 416     __ unlock_object(rdi, rsi, rax, *stub->entry());
 417     __ bind(*stub->continuation());
 418   }
 419 
 420   if (compilation()->env()->dtrace_method_probes()) {
 421     __ mov(rdi, r15_thread);
 422     __ mov_metadata(rsi, method()->constant_encoding());
 423     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 424   }
 425 
 426   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 427     __ mov(rax, rbx);  // Restore the exception
 428   }
 429 
 430   // remove the activation and dispatch to the unwind handler
 431   __ remove_frame(initial_frame_size_in_bytes());
 432   __ jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id)));
 433 
 434   // Emit the slow path assembly
 435   if (stub != nullptr) {
 436     stub->emit_code(this);
 437   }
 438 
 439   return offset;
 440 }
 441 
 442 
 443 int LIR_Assembler::emit_deopt_handler() {
 444   // generate code for exception handler
 445   address handler_base = __ start_a_stub(deopt_handler_size());
 446   if (handler_base == nullptr) {
 447     // not enough space left for the handler
 448     bailout("deopt handler overflow");
 449     return -1;
 450   }
 451 
 452   int offset = code_offset();
 453 
 454   Label start;
 455   __ bind(start);
 456 
 457   __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 458 
 459   int entry_offset = __ offset();
 460 
 461   __ jmp(start);
 462 
 463   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 464   assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
 465          "out of bounds read in post-call NOP check");
 466   __ end_a_stub();
 467 
 468   return entry_offset;
 469 }
 470 
 471 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 472   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 473   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 474     assert(result->fpu() == 0, "result must already be on TOS");
 475   }
 476 
 477   // Pop the stack before the safepoint code
 478   __ remove_frame(initial_frame_size_in_bytes());
 479 
 480   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 481     __ reserved_stack_check();
 482   }
 483 
 484   // Note: we do not need to round double result; float result has the right precision
 485   // the poll sets the condition code, but no data registers
 486 
 487   code_stub->set_safepoint_offset(__ offset());
 488   __ relocate(relocInfo::poll_return_type);
 489   __ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
 490   __ ret(0);
 491 }
 492 
 493 
 494 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 495   guarantee(info != nullptr, "Shouldn't be null");
 496   int offset = __ offset();
 497   const Register poll_addr = rscratch1;
 498   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 499   add_debug_info_for_branch(info);
 500   __ relocate(relocInfo::poll_type);
 501   address pre_pc = __ pc();
 502   __ testl(rax, Address(poll_addr, 0));
 503   address post_pc = __ pc();
 504   guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
 505   return offset;
 506 }
 507 
 508 
 509 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 510   if (from_reg != to_reg) __ mov(to_reg, from_reg);
 511 }
 512 
 513 void LIR_Assembler::swap_reg(Register a, Register b) {
 514   __ xchgptr(a, b);
 515 }
 516 
 517 
 518 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 519   assert(src->is_constant(), "should not call otherwise");
 520   assert(dest->is_register(), "should not call otherwise");
 521   LIR_Const* c = src->as_constant_ptr();
 522 
 523   switch (c->type()) {
 524     case T_INT: {
 525       assert(patch_code == lir_patch_none, "no patching handled here");
 526       __ movl(dest->as_register(), c->as_jint());
 527       break;
 528     }
 529 
 530     case T_ADDRESS: {
 531       assert(patch_code == lir_patch_none, "no patching handled here");
 532       __ movptr(dest->as_register(), c->as_jint());
 533       break;
 534     }
 535 
 536     case T_LONG: {
 537       assert(patch_code == lir_patch_none, "no patching handled here");
 538       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 539       break;
 540     }
 541 
 542     case T_OBJECT: {
 543       if (patch_code != lir_patch_none) {
 544         jobject2reg_with_patching(dest->as_register(), info);
 545       } else {
 546         __ movoop(dest->as_register(), c->as_jobject());
 547       }
 548       break;
 549     }
 550 
 551     case T_METADATA: {
 552       if (patch_code != lir_patch_none) {
 553         klass2reg_with_patching(dest->as_register(), info);
 554       } else {
 555         __ mov_metadata(dest->as_register(), c->as_metadata());
 556       }
 557       break;
 558     }
 559 
 560     case T_FLOAT: {
 561       if (dest->is_single_xmm()) {
 562         if (UseAVX <= 2 && c->is_zero_float()) {
 563           __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
 564         } else {
 565           __ movflt(dest->as_xmm_float_reg(),
 566                    InternalAddress(float_constant(c->as_jfloat())));
 567         }
 568       } else {
 569         ShouldNotReachHere();
 570       }
 571       break;
 572     }
 573 
 574     case T_DOUBLE: {
 575       if (dest->is_double_xmm()) {
 576         if (UseAVX <= 2 && c->is_zero_double()) {
 577           __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
 578         } else {
 579           __ movdbl(dest->as_xmm_double_reg(),
 580                     InternalAddress(double_constant(c->as_jdouble())));
 581         }
 582       } else {
 583         ShouldNotReachHere();
 584       }
 585       break;
 586     }
 587 
 588     default:
 589       ShouldNotReachHere();
 590   }
 591 }
 592 
 593 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 594   assert(src->is_constant(), "should not call otherwise");
 595   assert(dest->is_stack(), "should not call otherwise");
 596   LIR_Const* c = src->as_constant_ptr();
 597 
 598   switch (c->type()) {
 599     case T_INT:  // fall through
 600     case T_FLOAT:
 601       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 602       break;
 603 
 604     case T_ADDRESS:
 605       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 606       break;
 607 
 608     case T_OBJECT:
 609       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
 610       break;
 611 
 612     case T_LONG:  // fall through
 613     case T_DOUBLE:
 614       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 615                                               lo_word_offset_in_bytes),
 616                 (intptr_t)c->as_jlong_bits(),
 617                 rscratch1);
 618       break;
 619 
 620     default:
 621       ShouldNotReachHere();
 622   }
 623 }
 624 
 625 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 626   assert(src->is_constant(), "should not call otherwise");
 627   assert(dest->is_address(), "should not call otherwise");
 628   LIR_Const* c = src->as_constant_ptr();
 629   LIR_Address* addr = dest->as_address_ptr();
 630 
 631   int null_check_here = code_offset();
 632   switch (type) {
 633     case T_INT:    // fall through
 634     case T_FLOAT:
 635       __ movl(as_Address(addr), c->as_jint_bits());
 636       break;
 637 
 638     case T_ADDRESS:
 639       __ movptr(as_Address(addr), c->as_jint_bits());
 640       break;
 641 
 642     case T_OBJECT:  // fall through
 643     case T_ARRAY:
 644       if (c->as_jobject() == nullptr) {
 645         if (UseCompressedOops && !wide) {
 646           __ movl(as_Address(addr), NULL_WORD);
 647         } else {
 648           __ xorptr(rscratch1, rscratch1);
 649           null_check_here = code_offset();
 650           __ movptr(as_Address(addr), rscratch1);
 651         }
 652       } else {
 653         if (is_literal_address(addr)) {
 654           ShouldNotReachHere();
 655           __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
 656         } else {
 657           __ movoop(rscratch1, c->as_jobject());
 658           if (UseCompressedOops && !wide) {
 659             __ encode_heap_oop(rscratch1);
 660             null_check_here = code_offset();
 661             __ movl(as_Address_lo(addr), rscratch1);
 662           } else {
 663             null_check_here = code_offset();
 664             __ movptr(as_Address_lo(addr), rscratch1);
 665           }
 666         }
 667       }
 668       break;
 669 
 670     case T_LONG:    // fall through
 671     case T_DOUBLE:
 672       if (is_literal_address(addr)) {
 673         ShouldNotReachHere();
 674         __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
 675       } else {
 676         __ movptr(r10, (intptr_t)c->as_jlong_bits());
 677         null_check_here = code_offset();
 678         __ movptr(as_Address_lo(addr), r10);
 679       }
 680       break;
 681 
 682     case T_BOOLEAN: // fall through
 683     case T_BYTE:
 684       __ movb(as_Address(addr), c->as_jint() & 0xFF);
 685       break;
 686 
 687     case T_CHAR:    // fall through
 688     case T_SHORT:
 689       __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
 690       break;
 691 
 692     default:
 693       ShouldNotReachHere();
 694   };
 695 
 696   if (info != nullptr) {
 697     add_debug_info_for_null_check(null_check_here, info);
 698   }
 699 }
 700 
 701 
 702 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 703   assert(src->is_register(), "should not call otherwise");
 704   assert(dest->is_register(), "should not call otherwise");
 705 
 706   // move between cpu-registers
 707   if (dest->is_single_cpu()) {
 708     if (src->type() == T_LONG) {
 709       // Can do LONG -> OBJECT
 710       move_regs(src->as_register_lo(), dest->as_register());
 711       return;
 712     }
 713     assert(src->is_single_cpu(), "must match");
 714     if (src->type() == T_OBJECT) {
 715       __ verify_oop(src->as_register());
 716     }
 717     move_regs(src->as_register(), dest->as_register());
 718 
 719   } else if (dest->is_double_cpu()) {
 720     if (is_reference_type(src->type())) {
 721       // Surprising to me but we can see move of a long to t_object
 722       __ verify_oop(src->as_register());
 723       move_regs(src->as_register(), dest->as_register_lo());
 724       return;
 725     }
 726     assert(src->is_double_cpu(), "must match");
 727     Register f_lo = src->as_register_lo();
 728     Register f_hi = src->as_register_hi();
 729     Register t_lo = dest->as_register_lo();
 730     Register t_hi = dest->as_register_hi();
 731     assert(f_hi == f_lo, "must be same");
 732     assert(t_hi == t_lo, "must be same");
 733     move_regs(f_lo, t_lo);
 734 
 735     // move between xmm-registers
 736   } else if (dest->is_single_xmm()) {
 737     assert(src->is_single_xmm(), "must match");
 738     __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
 739   } else if (dest->is_double_xmm()) {
 740     assert(src->is_double_xmm(), "must match");
 741     __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
 742 
 743   } else {
 744     ShouldNotReachHere();
 745   }
 746 }
 747 
 748 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 749   assert(src->is_register(), "should not call otherwise");
 750   assert(dest->is_stack(), "should not call otherwise");
 751 
 752   if (src->is_single_cpu()) {
 753     Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
 754     if (is_reference_type(type)) {
 755       __ verify_oop(src->as_register());
 756       __ movptr (dst, src->as_register());
 757     } else if (type == T_METADATA || type == T_ADDRESS) {
 758       __ movptr (dst, src->as_register());
 759     } else {
 760       __ movl (dst, src->as_register());
 761     }
 762 
 763   } else if (src->is_double_cpu()) {
 764     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 765     Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
 766     __ movptr (dstLO, src->as_register_lo());
 767 
 768   } else if (src->is_single_xmm()) {
 769     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 770     __ movflt(dst_addr, src->as_xmm_float_reg());
 771 
 772   } else if (src->is_double_xmm()) {
 773     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 774     __ movdbl(dst_addr, src->as_xmm_double_reg());
 775 
 776   } else {
 777     ShouldNotReachHere();
 778   }
 779 }
 780 
 781 
 782 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 783   LIR_Address* to_addr = dest->as_address_ptr();
 784   PatchingStub* patch = nullptr;
 785   Register compressed_src = rscratch1;
 786 
 787   if (is_reference_type(type)) {
 788     __ verify_oop(src->as_register());
 789     if (UseCompressedOops && !wide) {
 790       __ movptr(compressed_src, src->as_register());
 791       __ encode_heap_oop(compressed_src);
 792       if (patch_code != lir_patch_none) {
 793         info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
 794       }
 795     }
 796   }
 797 
 798   if (patch_code != lir_patch_none) {
 799     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
 800     Address toa = as_Address(to_addr);
 801     assert(toa.disp() != 0, "must have");
 802   }
 803 
 804   int null_check_here = code_offset();
 805   switch (type) {
 806     case T_FLOAT: {
 807       assert(src->is_single_xmm(), "not a float");
 808       __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
 809       break;
 810     }
 811 
 812     case T_DOUBLE: {
 813       assert(src->is_double_xmm(), "not a double");
 814       __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
 815       break;
 816     }
 817 
 818     case T_ARRAY:   // fall through
 819     case T_OBJECT:  // fall through
 820       if (UseCompressedOops && !wide) {
 821         __ movl(as_Address(to_addr), compressed_src);
 822       } else {
 823         __ movptr(as_Address(to_addr), src->as_register());
 824       }
 825       break;
 826     case T_ADDRESS:
 827       __ movptr(as_Address(to_addr), src->as_register());
 828       break;
 829     case T_INT:
 830       __ movl(as_Address(to_addr), src->as_register());
 831       break;
 832 
 833     case T_LONG: {
 834       Register from_lo = src->as_register_lo();
 835       Register from_hi = src->as_register_hi();
 836       __ movptr(as_Address_lo(to_addr), from_lo);
 837       break;
 838     }
 839 
 840     case T_BYTE:    // fall through
 841     case T_BOOLEAN: {
 842       Register src_reg = src->as_register();
 843       Address dst_addr = as_Address(to_addr);
 844       assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
 845       __ movb(dst_addr, src_reg);
 846       break;
 847     }
 848 
 849     case T_CHAR:    // fall through
 850     case T_SHORT:
 851       __ movw(as_Address(to_addr), src->as_register());
 852       break;
 853 
 854     default:
 855       ShouldNotReachHere();
 856   }
 857   if (info != nullptr) {
 858     add_debug_info_for_null_check(null_check_here, info);
 859   }
 860 
 861   if (patch_code != lir_patch_none) {
 862     patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
 863   }
 864 }
 865 
 866 
 867 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 868   assert(src->is_stack(), "should not call otherwise");
 869   assert(dest->is_register(), "should not call otherwise");
 870 
 871   if (dest->is_single_cpu()) {
 872     if (is_reference_type(type)) {
 873       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 874       __ verify_oop(dest->as_register());
 875     } else if (type == T_METADATA || type == T_ADDRESS) {
 876       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 877     } else {
 878       __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 879     }
 880 
 881   } else if (dest->is_double_cpu()) {
 882     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 883     Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
 884     __ movptr(dest->as_register_lo(), src_addr_LO);
 885 
 886   } else if (dest->is_single_xmm()) {
 887     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 888     __ movflt(dest->as_xmm_float_reg(), src_addr);
 889 
 890   } else if (dest->is_double_xmm()) {
 891     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 892     __ movdbl(dest->as_xmm_double_reg(), src_addr);
 893 
 894   } else {
 895     ShouldNotReachHere();
 896   }
 897 }
 898 
 899 
 900 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 901   if (src->is_single_stack()) {
 902     if (is_reference_type(type)) {
 903       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
 904       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
 905     } else {
 906       //no pushl on 64bits
 907       __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
 908       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
 909     }
 910 
 911   } else if (src->is_double_stack()) {
 912     __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
 913     __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
 914 
 915   } else {
 916     ShouldNotReachHere();
 917   }
 918 }
 919 
 920 
 921 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 922   assert(src->is_address(), "should not call otherwise");
 923   assert(dest->is_register(), "should not call otherwise");
 924 
 925   LIR_Address* addr = src->as_address_ptr();
 926   Address from_addr = as_Address(addr);
 927 
 928   if (addr->base()->type() == T_OBJECT) {
 929     __ verify_oop(addr->base()->as_pointer_register());
 930   }
 931 
 932   switch (type) {
 933     case T_BOOLEAN: // fall through
 934     case T_BYTE:    // fall through
 935     case T_CHAR:    // fall through
 936     case T_SHORT:
 937       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
 938         // on pre P6 processors we may get partial register stalls
 939         // so blow away the value of to_rinfo before loading a
 940         // partial word into it.  Do it here so that it precedes
 941         // the potential patch point below.
 942         __ xorptr(dest->as_register(), dest->as_register());
 943       }
 944       break;
 945    default:
 946      break;
 947   }
 948 
 949   PatchingStub* patch = nullptr;
 950   if (patch_code != lir_patch_none) {
 951     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
 952     assert(from_addr.disp() != 0, "must have");
 953   }
 954   if (info != nullptr) {
 955     add_debug_info_for_null_check_here(info);
 956   }
 957 
 958   switch (type) {
 959     case T_FLOAT: {
 960       if (dest->is_single_xmm()) {
 961         __ movflt(dest->as_xmm_float_reg(), from_addr);
 962       } else {
 963         ShouldNotReachHere();
 964       }
 965       break;
 966     }
 967 
 968     case T_DOUBLE: {
 969       if (dest->is_double_xmm()) {
 970         __ movdbl(dest->as_xmm_double_reg(), from_addr);
 971       } else {
 972         ShouldNotReachHere();
 973       }
 974       break;
 975     }
 976 
 977     case T_OBJECT:  // fall through
 978     case T_ARRAY:   // fall through
 979       if (UseCompressedOops && !wide) {
 980         __ movl(dest->as_register(), from_addr);
 981       } else {
 982         __ movptr(dest->as_register(), from_addr);
 983       }
 984       break;
 985 
 986     case T_ADDRESS:
 987       __ movptr(dest->as_register(), from_addr);
 988       break;
 989     case T_INT:
 990       __ movl(dest->as_register(), from_addr);
 991       break;
 992 
 993     case T_LONG: {
 994       Register to_lo = dest->as_register_lo();
 995       Register to_hi = dest->as_register_hi();
 996       __ movptr(to_lo, as_Address_lo(addr));
 997       break;
 998     }
 999 
1000     case T_BOOLEAN: // fall through
1001     case T_BYTE: {
1002       Register dest_reg = dest->as_register();
1003       assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1004       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1005         __ movsbl(dest_reg, from_addr);
1006       } else {
1007         __ movb(dest_reg, from_addr);
1008         __ shll(dest_reg, 24);
1009         __ sarl(dest_reg, 24);
1010       }
1011       break;
1012     }
1013 
1014     case T_CHAR: {
1015       Register dest_reg = dest->as_register();
1016       assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1017       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1018         __ movzwl(dest_reg, from_addr);
1019       } else {
1020         __ movw(dest_reg, from_addr);
1021       }
1022       break;
1023     }
1024 
1025     case T_SHORT: {
1026       Register dest_reg = dest->as_register();
1027       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1028         __ movswl(dest_reg, from_addr);
1029       } else {
1030         __ movw(dest_reg, from_addr);
1031         __ shll(dest_reg, 16);
1032         __ sarl(dest_reg, 16);
1033       }
1034       break;
1035     }
1036 
1037     default:
1038       ShouldNotReachHere();
1039   }
1040 
1041   if (patch != nullptr) {
1042     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1043   }
1044 
1045   if (is_reference_type(type)) {
1046     if (UseCompressedOops && !wide) {
1047       __ decode_heap_oop(dest->as_register());
1048     }
1049 
1050     __ verify_oop(dest->as_register());
1051   }
1052 }
1053 
1054 
1055 NEEDS_CLEANUP; // This could be static?
1056 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1057   int elem_size = type2aelembytes(type);
1058   switch (elem_size) {
1059     case 1: return Address::times_1;
1060     case 2: return Address::times_2;
1061     case 4: return Address::times_4;
1062     case 8: return Address::times_8;
1063   }
1064   ShouldNotReachHere();
1065   return Address::no_scale;
1066 }
1067 
1068 
1069 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1070   switch (op->code()) {
1071     case lir_idiv:
1072     case lir_irem:
1073       arithmetic_idiv(op->code(),
1074                       op->in_opr1(),
1075                       op->in_opr2(),
1076                       op->in_opr3(),
1077                       op->result_opr(),
1078                       op->info());
1079       break;
1080     case lir_fmad:
1081       __ fmad(op->result_opr()->as_xmm_double_reg(),
1082               op->in_opr1()->as_xmm_double_reg(),
1083               op->in_opr2()->as_xmm_double_reg(),
1084               op->in_opr3()->as_xmm_double_reg());
1085       break;
1086     case lir_fmaf:
1087       __ fmaf(op->result_opr()->as_xmm_float_reg(),
1088               op->in_opr1()->as_xmm_float_reg(),
1089               op->in_opr2()->as_xmm_float_reg(),
1090               op->in_opr3()->as_xmm_float_reg());
1091       break;
1092     default:      ShouldNotReachHere(); break;
1093   }
1094 }
1095 
1096 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1097 #ifdef ASSERT
1098   assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
1099   if (op->block() != nullptr)  _branch_target_blocks.append(op->block());
1100   if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
1101 #endif
1102 
1103   if (op->cond() == lir_cond_always) {
1104     if (op->info() != nullptr) add_debug_info_for_branch(op->info());
1105     __ jmp (*(op->label()));
1106   } else {
1107     Assembler::Condition acond = Assembler::zero;
1108     if (op->code() == lir_cond_float_branch) {
1109       assert(op->ublock() != nullptr, "must have unordered successor");
1110       __ jcc(Assembler::parity, *(op->ublock()->label()));
1111       switch(op->cond()) {
1112         case lir_cond_equal:        acond = Assembler::equal;      break;
1113         case lir_cond_notEqual:     acond = Assembler::notEqual;   break;
1114         case lir_cond_less:         acond = Assembler::below;      break;
1115         case lir_cond_lessEqual:    acond = Assembler::belowEqual; break;
1116         case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1117         case lir_cond_greater:      acond = Assembler::above;      break;
1118         default:                         ShouldNotReachHere();
1119       }
1120     } else {
1121       switch (op->cond()) {
1122         case lir_cond_equal:        acond = Assembler::equal;       break;
1123         case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
1124         case lir_cond_less:         acond = Assembler::less;        break;
1125         case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
1126         case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1127         case lir_cond_greater:      acond = Assembler::greater;     break;
1128         case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
1129         case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
1130         default:                         ShouldNotReachHere();
1131       }
1132     }
1133     __ jcc(acond,*(op->label()));
1134   }
1135 }
1136 
1137 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1138   LIR_Opr src  = op->in_opr();
1139   LIR_Opr dest = op->result_opr();
1140 
1141   switch (op->bytecode()) {
1142     case Bytecodes::_i2l:
1143       __ movl2ptr(dest->as_register_lo(), src->as_register());
1144       break;
1145 
1146     case Bytecodes::_l2i:
1147       __ movl(dest->as_register(), src->as_register_lo());
1148       break;
1149 
1150     case Bytecodes::_i2b:
1151       move_regs(src->as_register(), dest->as_register());
1152       __ sign_extend_byte(dest->as_register());
1153       break;
1154 
1155     case Bytecodes::_i2c:
1156       move_regs(src->as_register(), dest->as_register());
1157       __ andl(dest->as_register(), 0xFFFF);
1158       break;
1159 
1160     case Bytecodes::_i2s:
1161       move_regs(src->as_register(), dest->as_register());
1162       __ sign_extend_short(dest->as_register());
1163       break;
1164 
1165     case Bytecodes::_f2d:
1166       __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1167       break;
1168 
1169     case Bytecodes::_d2f:
1170       __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1171       break;
1172 
1173     case Bytecodes::_i2f:
1174       __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1175       break;
1176 
1177     case Bytecodes::_i2d:
1178       __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1179       break;
1180 
1181     case Bytecodes::_l2f:
1182       __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo());
1183       break;
1184 
1185     case Bytecodes::_l2d:
1186       __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo());
1187       break;
1188 
1189     case Bytecodes::_f2i:
1190       __ convert_f2i(dest->as_register(), src->as_xmm_float_reg());
1191       break;
1192 
1193     case Bytecodes::_d2i:
1194       __ convert_d2i(dest->as_register(), src->as_xmm_double_reg());
1195       break;
1196 
1197     case Bytecodes::_f2l:
1198       __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg());
1199       break;
1200 
1201     case Bytecodes::_d2l:
1202       __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg());
1203       break;
1204 
1205     default: ShouldNotReachHere();
1206   }
1207 }
1208 
1209 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1210   if (op->init_check()) {
1211     add_debug_info_for_null_check_here(op->stub()->info());
1212     // init_state needs acquire, but x86 is TSO, and so we are already good.
1213     __ cmpb(Address(op->klass()->as_register(),
1214                     InstanceKlass::init_state_offset()),
1215                     InstanceKlass::fully_initialized);
1216     __ jcc(Assembler::notEqual, *op->stub()->entry());
1217   }
1218   __ allocate_object(op->obj()->as_register(),
1219                      op->tmp1()->as_register(),
1220                      op->tmp2()->as_register(),
1221                      op->header_size(),
1222                      op->object_size(),
1223                      op->klass()->as_register(),
1224                      *op->stub()->entry());
1225   __ bind(*op->stub()->continuation());
1226 }
1227 
1228 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1229   Register len =  op->len()->as_register();
1230   __ movslq(len, len);
1231 
1232   if (UseSlowPath ||
1233       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1234       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1235     __ jmp(*op->stub()->entry());
1236   } else {
1237     Register tmp1 = op->tmp1()->as_register();
1238     Register tmp2 = op->tmp2()->as_register();
1239     Register tmp3 = op->tmp3()->as_register();
1240     if (len == tmp1) {
1241       tmp1 = tmp3;
1242     } else if (len == tmp2) {
1243       tmp2 = tmp3;
1244     } else if (len == tmp3) {
1245       // everything is ok
1246     } else {
1247       __ mov(tmp3, len);
1248     }
1249     __ allocate_array(op->obj()->as_register(),
1250                       len,
1251                       tmp1,
1252                       tmp2,
1253                       arrayOopDesc::base_offset_in_bytes(op->type()),
1254                       array_element_size(op->type()),
1255                       op->klass()->as_register(),
1256                       *op->stub()->entry(),
1257                       op->zero_array());
1258   }
1259   __ bind(*op->stub()->continuation());
1260 }
1261 
1262 void LIR_Assembler::type_profile_helper(Register mdo,
1263                                         ciMethodData *md, ciProfileData *data,
1264                                         Register recv) {
1265   int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
1266   __ profile_receiver_type(recv, mdo, mdp_offset);
1267 }
1268 
1269 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1270   // we always need a stub for the failure case.
1271   CodeStub* stub = op->stub();
1272   Register obj = op->object()->as_register();
1273   Register k_RInfo = op->tmp1()->as_register();
1274   Register klass_RInfo = op->tmp2()->as_register();
1275   Register dst = op->result_opr()->as_register();
1276   ciKlass* k = op->klass();
1277   Register Rtmp1 = noreg;
1278   Register tmp_load_klass = rscratch1;
1279 
1280   // check if it needs to be profiled
1281   ciMethodData* md = nullptr;
1282   ciProfileData* data = nullptr;
1283 
1284   if (op->should_profile()) {
1285     ciMethod* method = op->profiled_method();
1286     assert(method != nullptr, "Should have method");
1287     int bci = op->profiled_bci();
1288     md = method->method_data_or_null();
1289     assert(md != nullptr, "Sanity");
1290     data = md->bci_to_data(bci);
1291     assert(data != nullptr,                "need data for type check");
1292     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1293   }
1294   Label* success_target = success;
1295   Label* failure_target = failure;
1296 
1297   if (obj == k_RInfo) {
1298     k_RInfo = dst;
1299   } else if (obj == klass_RInfo) {
1300     klass_RInfo = dst;
1301   }
1302   if (k->is_loaded() && !UseCompressedClassPointers) {
1303     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1304   } else {
1305     Rtmp1 = op->tmp3()->as_register();
1306     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1307   }
1308 
1309   assert_different_registers(obj, k_RInfo, klass_RInfo);
1310 
1311   __ testptr(obj, obj);
1312   if (op->should_profile()) {
1313     Label not_null;
1314     Register mdo  = klass_RInfo;
1315     __ mov_metadata(mdo, md->constant_encoding());
1316     __ jccb(Assembler::notEqual, not_null);
1317     // Object is null; update MDO and exit
1318     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1319     int header_bits = BitData::null_seen_byte_constant();
1320     __ orb(data_addr, header_bits);
1321     __ jmp(*obj_is_null);
1322     __ bind(not_null);
1323 
1324     Register recv = k_RInfo;
1325     __ load_klass(recv, obj, tmp_load_klass);
1326     type_profile_helper(mdo, md, data, recv);
1327   } else {
1328     __ jcc(Assembler::equal, *obj_is_null);
1329   }
1330 
1331   if (!k->is_loaded()) {
1332     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1333   } else {
1334     __ mov_metadata(k_RInfo, k->constant_encoding());
1335   }
1336   __ verify_oop(obj);
1337 
1338   if (op->fast_check()) {
1339     // get object class
1340     // not a safepoint as obj null check happens earlier
1341     if (UseCompressedClassPointers) {
1342       __ load_klass(Rtmp1, obj, tmp_load_klass);
1343       __ cmpptr(k_RInfo, Rtmp1);
1344     } else {
1345       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1346     }
1347     __ jcc(Assembler::notEqual, *failure_target);
1348     // successful cast, fall through to profile or jump
1349   } else {
1350     // get object class
1351     // not a safepoint as obj null check happens earlier
1352     __ load_klass(klass_RInfo, obj, tmp_load_klass);
1353     if (k->is_loaded()) {
1354       // See if we get an immediate positive hit
1355       __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1356       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1357         __ jcc(Assembler::notEqual, *failure_target);
1358         // successful cast, fall through to profile or jump
1359       } else {
1360         // See if we get an immediate positive hit
1361         __ jcc(Assembler::equal, *success_target);
1362         // check for self
1363         __ cmpptr(klass_RInfo, k_RInfo);
1364         __ jcc(Assembler::equal, *success_target);
1365 
1366         __ push_ppx(klass_RInfo);
1367         __ push_ppx(k_RInfo);
1368         __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1369         __ pop_ppx(klass_RInfo);
1370         __ pop_ppx(klass_RInfo);
1371         // result is a boolean
1372         __ testl(klass_RInfo, klass_RInfo);
1373         __ jcc(Assembler::equal, *failure_target);
1374         // successful cast, fall through to profile or jump
1375       }
1376     } else {
1377       // perform the fast part of the checking logic
1378       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1379       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1380       __ push_ppx(klass_RInfo);
1381       __ push_ppx(k_RInfo);
1382       __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1383       __ pop_ppx(klass_RInfo);
1384       __ pop_ppx(k_RInfo);
1385       // result is a boolean
1386       __ testl(k_RInfo, k_RInfo);
1387       __ jcc(Assembler::equal, *failure_target);
1388       // successful cast, fall through to profile or jump
1389     }
1390   }
1391   __ jmp(*success);
1392 }
1393 
1394 
1395 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1396   Register tmp_load_klass = rscratch1;
1397   LIR_Code code = op->code();
1398   if (code == lir_store_check) {
1399     Register value = op->object()->as_register();
1400     Register array = op->array()->as_register();
1401     Register k_RInfo = op->tmp1()->as_register();
1402     Register klass_RInfo = op->tmp2()->as_register();
1403     Register Rtmp1 = op->tmp3()->as_register();
1404 
1405     CodeStub* stub = op->stub();
1406 
1407     // check if it needs to be profiled
1408     ciMethodData* md = nullptr;
1409     ciProfileData* data = nullptr;
1410 
1411     if (op->should_profile()) {
1412       ciMethod* method = op->profiled_method();
1413       assert(method != nullptr, "Should have method");
1414       int bci = op->profiled_bci();
1415       md = method->method_data_or_null();
1416       assert(md != nullptr, "Sanity");
1417       data = md->bci_to_data(bci);
1418       assert(data != nullptr,                "need data for type check");
1419       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1420     }
1421     Label done;
1422     Label* success_target = &done;
1423     Label* failure_target = stub->entry();
1424 
1425     __ testptr(value, value);
1426     if (op->should_profile()) {
1427       Label not_null;
1428       Register mdo  = klass_RInfo;
1429       __ mov_metadata(mdo, md->constant_encoding());
1430       __ jccb(Assembler::notEqual, not_null);
1431       // Object is null; update MDO and exit
1432       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1433       int header_bits = BitData::null_seen_byte_constant();
1434       __ orb(data_addr, header_bits);
1435       __ jmp(done);
1436       __ bind(not_null);
1437 
1438       Register recv = k_RInfo;
1439       __ load_klass(recv, value, tmp_load_klass);
1440       type_profile_helper(mdo, md, data, recv);
1441     } else {
1442       __ jcc(Assembler::equal, done);
1443     }
1444 
1445     add_debug_info_for_null_check_here(op->info_for_exception());
1446     __ load_klass(k_RInfo, array, tmp_load_klass);
1447     __ load_klass(klass_RInfo, value, tmp_load_klass);
1448 
1449     // get instance klass (it's already uncompressed)
1450     __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1451     // perform the fast part of the checking logic
1452     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
1453     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1454     __ push_ppx(klass_RInfo);
1455     __ push_ppx(k_RInfo);
1456     __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
1457     __ pop_ppx(klass_RInfo);
1458     __ pop_ppx(k_RInfo);
1459     // result is a boolean
1460     __ testl(k_RInfo, k_RInfo);
1461     __ jcc(Assembler::equal, *failure_target);
1462     // fall through to the success case
1463 
1464     __ bind(done);
1465   } else
1466     if (code == lir_checkcast) {
1467       Register obj = op->object()->as_register();
1468       Register dst = op->result_opr()->as_register();
1469       Label success;
1470       emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1471       __ bind(success);
1472       if (dst != obj) {
1473         __ mov(dst, obj);
1474       }
1475     } else
1476       if (code == lir_instanceof) {
1477         Register obj = op->object()->as_register();
1478         Register dst = op->result_opr()->as_register();
1479         Label success, failure, done;
1480         emit_typecheck_helper(op, &success, &failure, &failure);
1481         __ bind(failure);
1482         __ xorptr(dst, dst);
1483         __ jmpb(done);
1484         __ bind(success);
1485         __ movptr(dst, 1);
1486         __ bind(done);
1487       } else {
1488         ShouldNotReachHere();
1489       }
1490 
1491 }
1492 
1493 
1494 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1495   if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1496     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1497     Register newval = op->new_value()->as_register();
1498     Register cmpval = op->cmp_value()->as_register();
1499     assert(cmpval == rax, "wrong register");
1500     assert(newval != noreg, "new val must be register");
1501     assert(cmpval != newval, "cmp and new values must be in different registers");
1502     assert(cmpval != addr, "cmp and addr must be in different registers");
1503     assert(newval != addr, "new value and addr must be in different registers");
1504 
1505     if (op->code() == lir_cas_obj) {
1506       if (UseCompressedOops) {
1507         __ encode_heap_oop(cmpval);
1508         __ mov(rscratch1, newval);
1509         __ encode_heap_oop(rscratch1);
1510         __ lock();
1511         // cmpval (rax) is implicitly used by this instruction
1512         __ cmpxchgl(rscratch1, Address(addr, 0));
1513       } else {
1514         __ lock();
1515         __ cmpxchgptr(newval, Address(addr, 0));
1516       }
1517     } else {
1518       assert(op->code() == lir_cas_int, "lir_cas_int expected");
1519       __ lock();
1520       __ cmpxchgl(newval, Address(addr, 0));
1521     }
1522   } else if (op->code() == lir_cas_long) {
1523     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1524     Register newval = op->new_value()->as_register_lo();
1525     Register cmpval = op->cmp_value()->as_register_lo();
1526     assert(cmpval == rax, "wrong register");
1527     assert(newval != noreg, "new val must be register");
1528     assert(cmpval != newval, "cmp and new values must be in different registers");
1529     assert(cmpval != addr, "cmp and addr must be in different registers");
1530     assert(newval != addr, "new value and addr must be in different registers");
1531     __ lock();
1532     __ cmpxchgq(newval, Address(addr, 0));
1533   } else {
1534     Unimplemented();
1535   }
1536 }
1537 
1538 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1539                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1540   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1541 
1542   Assembler::Condition acond, ncond;
1543   switch (condition) {
1544     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1545     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1546     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1547     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1548     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1549     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1550     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1551     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1552     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
1553                                 ShouldNotReachHere();
1554   }
1555 
1556   if (opr1->is_cpu_register()) {
1557     reg2reg(opr1, result);
1558   } else if (opr1->is_stack()) {
1559     stack2reg(opr1, result, result->type());
1560   } else if (opr1->is_constant()) {
1561     const2reg(opr1, result, lir_patch_none, nullptr);
1562   } else {
1563     ShouldNotReachHere();
1564   }
1565 
1566   if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1567     // optimized version that does not require a branch
1568     if (opr2->is_single_cpu()) {
1569       assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1570       __ cmov(ncond, result->as_register(), opr2->as_register());
1571     } else if (opr2->is_double_cpu()) {
1572       assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1573       assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1574       __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
1575     } else if (opr2->is_single_stack()) {
1576       __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1577     } else if (opr2->is_double_stack()) {
1578       __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1579     } else {
1580       ShouldNotReachHere();
1581     }
1582 
1583   } else {
1584     Label skip;
1585     __ jccb(acond, skip);
1586     if (opr2->is_cpu_register()) {
1587       reg2reg(opr2, result);
1588     } else if (opr2->is_stack()) {
1589       stack2reg(opr2, result, result->type());
1590     } else if (opr2->is_constant()) {
1591       const2reg(opr2, result, lir_patch_none, nullptr);
1592     } else {
1593       ShouldNotReachHere();
1594     }
1595     __ bind(skip);
1596   }
1597 }
1598 
1599 
1600 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) {
1601   assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1602 
1603   if (left->is_single_cpu()) {
1604     assert(left == dest, "left and dest must be equal");
1605     Register lreg = left->as_register();
1606 
1607     if (right->is_single_cpu()) {
1608       // cpu register - cpu register
1609       Register rreg = right->as_register();
1610       switch (code) {
1611         case lir_add: __ addl (lreg, rreg); break;
1612         case lir_sub: __ subl (lreg, rreg); break;
1613         case lir_mul: __ imull(lreg, rreg); break;
1614         default:      ShouldNotReachHere();
1615       }
1616 
1617     } else if (right->is_stack()) {
1618       // cpu register - stack
1619       Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1620       switch (code) {
1621         case lir_add: __ addl(lreg, raddr); break;
1622         case lir_sub: __ subl(lreg, raddr); break;
1623         default:      ShouldNotReachHere();
1624       }
1625 
1626     } else if (right->is_constant()) {
1627       // cpu register - constant
1628       jint c = right->as_constant_ptr()->as_jint();
1629       switch (code) {
1630         case lir_add: {
1631           __ incrementl(lreg, c);
1632           break;
1633         }
1634         case lir_sub: {
1635           __ decrementl(lreg, c);
1636           break;
1637         }
1638         default: ShouldNotReachHere();
1639       }
1640 
1641     } else {
1642       ShouldNotReachHere();
1643     }
1644 
1645   } else if (left->is_double_cpu()) {
1646     assert(left == dest, "left and dest must be equal");
1647     Register lreg_lo = left->as_register_lo();
1648     Register lreg_hi = left->as_register_hi();
1649 
1650     if (right->is_double_cpu()) {
1651       // cpu register - cpu register
1652       Register rreg_lo = right->as_register_lo();
1653       Register rreg_hi = right->as_register_hi();
1654       assert_different_registers(lreg_lo, rreg_lo);
1655       switch (code) {
1656         case lir_add:
1657           __ addptr(lreg_lo, rreg_lo);
1658           break;
1659         case lir_sub:
1660           __ subptr(lreg_lo, rreg_lo);
1661           break;
1662         case lir_mul:
1663           __ imulq(lreg_lo, rreg_lo);
1664           break;
1665         default:
1666           ShouldNotReachHere();
1667       }
1668 
1669     } else if (right->is_constant()) {
1670       // cpu register - constant
1671       jlong c = right->as_constant_ptr()->as_jlong_bits();
1672       __ movptr(r10, (intptr_t) c);
1673       switch (code) {
1674         case lir_add:
1675           __ addptr(lreg_lo, r10);
1676           break;
1677         case lir_sub:
1678           __ subptr(lreg_lo, r10);
1679           break;
1680         default:
1681           ShouldNotReachHere();
1682       }
1683 
1684     } else {
1685       ShouldNotReachHere();
1686     }
1687 
1688   } else if (left->is_single_xmm()) {
1689     assert(left == dest, "left and dest must be equal");
1690     XMMRegister lreg = left->as_xmm_float_reg();
1691 
1692     if (right->is_single_xmm()) {
1693       XMMRegister rreg = right->as_xmm_float_reg();
1694       switch (code) {
1695         case lir_add: __ addss(lreg, rreg);  break;
1696         case lir_sub: __ subss(lreg, rreg);  break;
1697         case lir_mul: __ mulss(lreg, rreg);  break;
1698         case lir_div: __ divss(lreg, rreg);  break;
1699         default: ShouldNotReachHere();
1700       }
1701     } else {
1702       Address raddr;
1703       if (right->is_single_stack()) {
1704         raddr = frame_map()->address_for_slot(right->single_stack_ix());
1705       } else if (right->is_constant()) {
1706         // hack for now
1707         raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
1708       } else {
1709         ShouldNotReachHere();
1710       }
1711       switch (code) {
1712         case lir_add: __ addss(lreg, raddr);  break;
1713         case lir_sub: __ subss(lreg, raddr);  break;
1714         case lir_mul: __ mulss(lreg, raddr);  break;
1715         case lir_div: __ divss(lreg, raddr);  break;
1716         default: ShouldNotReachHere();
1717       }
1718     }
1719 
1720   } else if (left->is_double_xmm()) {
1721     assert(left == dest, "left and dest must be equal");
1722 
1723     XMMRegister lreg = left->as_xmm_double_reg();
1724     if (right->is_double_xmm()) {
1725       XMMRegister rreg = right->as_xmm_double_reg();
1726       switch (code) {
1727         case lir_add: __ addsd(lreg, rreg);  break;
1728         case lir_sub: __ subsd(lreg, rreg);  break;
1729         case lir_mul: __ mulsd(lreg, rreg);  break;
1730         case lir_div: __ divsd(lreg, rreg);  break;
1731         default: ShouldNotReachHere();
1732       }
1733     } else {
1734       Address raddr;
1735       if (right->is_double_stack()) {
1736         raddr = frame_map()->address_for_slot(right->double_stack_ix());
1737       } else if (right->is_constant()) {
1738         // hack for now
1739         raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
1740       } else {
1741         ShouldNotReachHere();
1742       }
1743       switch (code) {
1744         case lir_add: __ addsd(lreg, raddr);  break;
1745         case lir_sub: __ subsd(lreg, raddr);  break;
1746         case lir_mul: __ mulsd(lreg, raddr);  break;
1747         case lir_div: __ divsd(lreg, raddr);  break;
1748         default: ShouldNotReachHere();
1749       }
1750     }
1751 
1752   } else if (left->is_single_stack() || left->is_address()) {
1753     assert(left == dest, "left and dest must be equal");
1754 
1755     Address laddr;
1756     if (left->is_single_stack()) {
1757       laddr = frame_map()->address_for_slot(left->single_stack_ix());
1758     } else if (left->is_address()) {
1759       laddr = as_Address(left->as_address_ptr());
1760     } else {
1761       ShouldNotReachHere();
1762     }
1763 
1764     if (right->is_single_cpu()) {
1765       Register rreg = right->as_register();
1766       switch (code) {
1767         case lir_add: __ addl(laddr, rreg); break;
1768         case lir_sub: __ subl(laddr, rreg); break;
1769         default:      ShouldNotReachHere();
1770       }
1771     } else if (right->is_constant()) {
1772       jint c = right->as_constant_ptr()->as_jint();
1773       switch (code) {
1774         case lir_add: {
1775           __ incrementl(laddr, c);
1776           break;
1777         }
1778         case lir_sub: {
1779           __ decrementl(laddr, c);
1780           break;
1781         }
1782         default: ShouldNotReachHere();
1783       }
1784     } else {
1785       ShouldNotReachHere();
1786     }
1787 
1788   } else {
1789     ShouldNotReachHere();
1790   }
1791 }
1792 
1793 
1794 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1795   if (value->is_double_xmm()) {
1796     switch(code) {
1797       case lir_abs :
1798         {
1799           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1800             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1801           }
1802           assert(!tmp->is_valid(), "do not need temporary");
1803           __ andpd(dest->as_xmm_double_reg(),
1804                    ExternalAddress((address)double_signmask_pool),
1805                    rscratch1);
1806         }
1807         break;
1808 
1809       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1810       // all other intrinsics are not available in the SSE instruction set, so FPU is used
1811       default      : ShouldNotReachHere();
1812     }
1813 
1814   } else if (code == lir_f2hf) {
1815     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1816   } else if (code == lir_hf2f) {
1817     __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1818   } else {
1819     Unimplemented();
1820   }
1821 }
1822 
1823 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1824   // assert(left->destroys_register(), "check");
1825   if (left->is_single_cpu()) {
1826     Register reg = left->as_register();
1827     if (right->is_constant()) {
1828       int val = right->as_constant_ptr()->as_jint();
1829       switch (code) {
1830         case lir_logic_and: __ andl (reg, val); break;
1831         case lir_logic_or:  __ orl  (reg, val); break;
1832         case lir_logic_xor: __ xorl (reg, val); break;
1833         default: ShouldNotReachHere();
1834       }
1835     } else if (right->is_stack()) {
1836       // added support for stack operands
1837       Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1838       switch (code) {
1839         case lir_logic_and: __ andl (reg, raddr); break;
1840         case lir_logic_or:  __ orl  (reg, raddr); break;
1841         case lir_logic_xor: __ xorl (reg, raddr); break;
1842         default: ShouldNotReachHere();
1843       }
1844     } else {
1845       Register rright = right->as_register();
1846       switch (code) {
1847         case lir_logic_and: __ andptr (reg, rright); break;
1848         case lir_logic_or : __ orptr  (reg, rright); break;
1849         case lir_logic_xor: __ xorptr (reg, rright); break;
1850         default: ShouldNotReachHere();
1851       }
1852     }
1853     move_regs(reg, dst->as_register());
1854   } else {
1855     Register l_lo = left->as_register_lo();
1856     Register l_hi = left->as_register_hi();
1857     if (right->is_constant()) {
1858       __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
1859       switch (code) {
1860         case lir_logic_and:
1861           __ andq(l_lo, rscratch1);
1862           break;
1863         case lir_logic_or:
1864           __ orq(l_lo, rscratch1);
1865           break;
1866         case lir_logic_xor:
1867           __ xorq(l_lo, rscratch1);
1868           break;
1869         default: ShouldNotReachHere();
1870       }
1871     } else {
1872       Register r_lo;
1873       if (is_reference_type(right->type())) {
1874         r_lo = right->as_register();
1875       } else {
1876         r_lo = right->as_register_lo();
1877       }
1878       switch (code) {
1879         case lir_logic_and:
1880           __ andptr(l_lo, r_lo);
1881           break;
1882         case lir_logic_or:
1883           __ orptr(l_lo, r_lo);
1884           break;
1885         case lir_logic_xor:
1886           __ xorptr(l_lo, r_lo);
1887           break;
1888         default: ShouldNotReachHere();
1889       }
1890     }
1891 
1892     Register dst_lo = dst->as_register_lo();
1893     Register dst_hi = dst->as_register_hi();
1894 
1895     move_regs(l_lo, dst_lo);
1896   }
1897 }
1898 
1899 
1900 // we assume that rax, and rdx can be overwritten
1901 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
1902 
1903   assert(left->is_single_cpu(),   "left must be register");
1904   assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
1905   assert(result->is_single_cpu(), "result must be register");
1906 
1907   //  assert(left->destroys_register(), "check");
1908   //  assert(right->destroys_register(), "check");
1909 
1910   Register lreg = left->as_register();
1911   Register dreg = result->as_register();
1912 
1913   if (right->is_constant()) {
1914     jint divisor = right->as_constant_ptr()->as_jint();
1915     assert(divisor > 0 && is_power_of_2(divisor), "must be");
1916     if (code == lir_idiv) {
1917       assert(lreg == rax, "must be rax,");
1918       assert(temp->as_register() == rdx, "tmp register must be rdx");
1919       __ cdql(); // sign extend into rdx:rax
1920       if (divisor == 2) {
1921         __ subl(lreg, rdx);
1922       } else {
1923         __ andl(rdx, divisor - 1);
1924         __ addl(lreg, rdx);
1925       }
1926       __ sarl(lreg, log2i_exact(divisor));
1927       move_regs(lreg, dreg);
1928     } else if (code == lir_irem) {
1929       Label done;
1930       __ mov(dreg, lreg);
1931       __ andl(dreg, 0x80000000 | (divisor - 1));
1932       __ jcc(Assembler::positive, done);
1933       __ decrement(dreg);
1934       __ orl(dreg, ~(divisor - 1));
1935       __ increment(dreg);
1936       __ bind(done);
1937     } else {
1938       ShouldNotReachHere();
1939     }
1940   } else {
1941     Register rreg = right->as_register();
1942     assert(lreg == rax, "left register must be rax,");
1943     assert(rreg != rdx, "right register must not be rdx");
1944     assert(temp->as_register() == rdx, "tmp register must be rdx");
1945 
1946     move_regs(lreg, rax);
1947 
1948     int idivl_offset = __ corrected_idivl(rreg);
1949     if (ImplicitDiv0Checks) {
1950       add_debug_info_for_div0(idivl_offset, info);
1951     }
1952     if (code == lir_irem) {
1953       move_regs(rdx, dreg); // result is in rdx
1954     } else {
1955       move_regs(rax, dreg);
1956     }
1957   }
1958 }
1959 
1960 
1961 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1962   if (opr1->is_single_cpu()) {
1963     Register reg1 = opr1->as_register();
1964     if (opr2->is_single_cpu()) {
1965       // cpu register - cpu register
1966       if (is_reference_type(opr1->type())) {
1967         __ cmpoop(reg1, opr2->as_register());
1968       } else {
1969         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1970         __ cmpl(reg1, opr2->as_register());
1971       }
1972     } else if (opr2->is_stack()) {
1973       // cpu register - stack
1974       if (is_reference_type(opr1->type())) {
1975         __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1976       } else {
1977         __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1978       }
1979     } else if (opr2->is_constant()) {
1980       // cpu register - constant
1981       LIR_Const* c = opr2->as_constant_ptr();
1982       if (c->type() == T_INT) {
1983         jint i = c->as_jint();
1984         if (i == 0) {
1985           __ testl(reg1, reg1);
1986         } else {
1987           __ cmpl(reg1, i);
1988         }
1989       } else if (c->type() == T_METADATA) {
1990         // All we need for now is a comparison with null for equality.
1991         assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1992         Metadata* m = c->as_metadata();
1993         if (m == nullptr) {
1994           __ testptr(reg1, reg1);
1995         } else {
1996           ShouldNotReachHere();
1997         }
1998       } else if (is_reference_type(c->type())) {
1999         // In 64bit oops are single register
2000         jobject o = c->as_jobject();
2001         if (o == nullptr) {
2002           __ testptr(reg1, reg1);
2003         } else {
2004           __ cmpoop(reg1, o, rscratch1);
2005         }
2006       } else {
2007         fatal("unexpected type: %s", basictype_to_str(c->type()));
2008       }
2009       // cpu register - address
2010     } else if (opr2->is_address()) {
2011       if (op->info() != nullptr) {
2012         add_debug_info_for_null_check_here(op->info());
2013       }
2014       __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2015     } else {
2016       ShouldNotReachHere();
2017     }
2018 
2019   } else if(opr1->is_double_cpu()) {
2020     Register xlo = opr1->as_register_lo();
2021     Register xhi = opr1->as_register_hi();
2022     if (opr2->is_double_cpu()) {
2023       __ cmpptr(xlo, opr2->as_register_lo());
2024     } else if (opr2->is_constant()) {
2025       // cpu register - constant 0
2026       assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2027       __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2028     } else {
2029       ShouldNotReachHere();
2030     }
2031 
2032   } else if (opr1->is_single_xmm()) {
2033     XMMRegister reg1 = opr1->as_xmm_float_reg();
2034     if (opr2->is_single_xmm()) {
2035       // xmm register - xmm register
2036       __ ucomiss(reg1, opr2->as_xmm_float_reg());
2037     } else if (opr2->is_stack()) {
2038       // xmm register - stack
2039       __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2040     } else if (opr2->is_constant()) {
2041       // xmm register - constant
2042       __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2043     } else if (opr2->is_address()) {
2044       // xmm register - address
2045       if (op->info() != nullptr) {
2046         add_debug_info_for_null_check_here(op->info());
2047       }
2048       __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2049     } else {
2050       ShouldNotReachHere();
2051     }
2052 
2053   } else if (opr1->is_double_xmm()) {
2054     XMMRegister reg1 = opr1->as_xmm_double_reg();
2055     if (opr2->is_double_xmm()) {
2056       // xmm register - xmm register
2057       __ ucomisd(reg1, opr2->as_xmm_double_reg());
2058     } else if (opr2->is_stack()) {
2059       // xmm register - stack
2060       __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2061     } else if (opr2->is_constant()) {
2062       // xmm register - constant
2063       __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2064     } else if (opr2->is_address()) {
2065       // xmm register - address
2066       if (op->info() != nullptr) {
2067         add_debug_info_for_null_check_here(op->info());
2068       }
2069       __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2070     } else {
2071       ShouldNotReachHere();
2072     }
2073 
2074   } else if (opr1->is_address() && opr2->is_constant()) {
2075     LIR_Const* c = opr2->as_constant_ptr();
2076     if (is_reference_type(c->type())) {
2077       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2078       __ movoop(rscratch1, c->as_jobject());
2079     }
2080     if (op->info() != nullptr) {
2081       add_debug_info_for_null_check_here(op->info());
2082     }
2083     // special case: address - constant
2084     LIR_Address* addr = opr1->as_address_ptr();
2085     if (c->type() == T_INT) {
2086       __ cmpl(as_Address(addr), c->as_jint());
2087     } else if (is_reference_type(c->type())) {
2088       // %%% Make this explode if addr isn't reachable until we figure out a
2089       // better strategy by giving noreg as the temp for as_Address
2090       __ cmpoop(rscratch1, as_Address(addr, noreg));
2091     } else {
2092       ShouldNotReachHere();
2093     }
2094 
2095   } else {
2096     ShouldNotReachHere();
2097   }
2098 }
2099 
2100 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2101   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2102     if (left->is_single_xmm()) {
2103       assert(right->is_single_xmm(), "must match");
2104       __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2105     } else if (left->is_double_xmm()) {
2106       assert(right->is_double_xmm(), "must match");
2107       __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2108 
2109     } else {
2110       ShouldNotReachHere();
2111     }
2112   } else {
2113     assert(code == lir_cmp_l2i, "check");
2114     Label done;
2115     Register dest = dst->as_register();
2116     __ cmpptr(left->as_register_lo(), right->as_register_lo());
2117     __ movl(dest, -1);
2118     __ jccb(Assembler::less, done);
2119     __ setb(Assembler::notZero, dest);
2120     __ movzbl(dest, dest);
2121     __ bind(done);
2122   }
2123 }
2124 
2125 
2126 void LIR_Assembler::align_call(LIR_Code code) {
2127   // make sure that the displacement word of the call ends up word aligned
2128   int offset = __ offset();
2129   switch (code) {
2130   case lir_static_call:
2131   case lir_optvirtual_call:
2132   case lir_dynamic_call:
2133     offset += NativeCall::displacement_offset;
2134     break;
2135   case lir_icvirtual_call:
2136     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2137     break;
2138   default: ShouldNotReachHere();
2139   }
2140   __ align(BytesPerWord, offset);
2141 }
2142 
2143 
2144 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2145   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2146          "must be aligned");
2147   __ call(AddressLiteral(op->addr(), rtype));
2148   add_call_info(code_offset(), op->info());
2149   __ post_call_nop();
2150 }
2151 
2152 
2153 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2154   __ ic_call(op->addr());
2155   add_call_info(code_offset(), op->info());
2156   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2157          "must be aligned");
2158   __ post_call_nop();
2159 }
2160 
2161 
2162 void LIR_Assembler::emit_static_call_stub() {
2163   address call_pc = __ pc();
2164   address stub = __ start_a_stub(call_stub_size());
2165   if (stub == nullptr) {
2166     bailout("static call stub overflow");
2167     return;
2168   }
2169 
2170   int start = __ offset();
2171 
2172   // make sure that the displacement word of the call ends up word aligned
2173   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
2174   __ relocate(static_stub_Relocation::spec(call_pc));
2175   __ mov_metadata(rbx, (Metadata*)nullptr);
2176   // must be set to -1 at code generation time
2177   assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2178   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2179   __ jump(RuntimeAddress(__ pc()));
2180 
2181   assert(__ offset() - start <= call_stub_size(), "stub too big");
2182   __ end_a_stub();
2183 }
2184 
2185 
2186 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2187   assert(exceptionOop->as_register() == rax, "must match");
2188   assert(exceptionPC->as_register() == rdx, "must match");
2189 
2190   // exception object is not added to oop map by LinearScan
2191   // (LinearScan assumes that no oops are in fixed registers)
2192   info->add_register_oop(exceptionOop);
2193   StubId unwind_id;
2194 
2195   // get current pc information
2196   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2197   int pc_for_athrow_offset = __ offset();
2198   InternalAddress pc_for_athrow(__ pc());
2199   __ lea(exceptionPC->as_register(), pc_for_athrow);
2200   add_call_info(pc_for_athrow_offset, info); // for exception handler
2201 
2202   __ verify_not_null_oop(rax);
2203   // search an exception handler (rax: exception oop, rdx: throwing pc)
2204   if (compilation()->has_fpu_code()) {
2205     unwind_id = StubId::c1_handle_exception_id;
2206   } else {
2207     unwind_id = StubId::c1_handle_exception_nofpu_id;
2208   }
2209   __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2210 
2211   // enough room for two byte trap
2212   __ nop();
2213 }
2214 
2215 
2216 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2217   assert(exceptionOop->as_register() == rax, "must match");
2218 
2219   __ jmp(_unwind_handler_entry);
2220 }
2221 
2222 
2223 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2224 
2225   // optimized version for linear scan:
2226   // * count must be already in ECX (guaranteed by LinearScan)
2227   // * left and dest must be equal
2228   // * tmp must be unused
2229   assert(count->as_register() == SHIFT_count, "count must be in ECX");
2230   assert(left == dest, "left and dest must be equal");
2231   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2232 
2233   if (left->is_single_cpu()) {
2234     Register value = left->as_register();
2235     assert(value != SHIFT_count, "left cannot be ECX");
2236 
2237     switch (code) {
2238       case lir_shl:  __ shll(value); break;
2239       case lir_shr:  __ sarl(value); break;
2240       case lir_ushr: __ shrl(value); break;
2241       default: ShouldNotReachHere();
2242     }
2243   } else if (left->is_double_cpu()) {
2244     Register lo = left->as_register_lo();
2245     Register hi = left->as_register_hi();
2246     assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2247     switch (code) {
2248       case lir_shl:  __ shlptr(lo);        break;
2249       case lir_shr:  __ sarptr(lo);        break;
2250       case lir_ushr: __ shrptr(lo);        break;
2251       default: ShouldNotReachHere();
2252     }
2253   } else {
2254     ShouldNotReachHere();
2255   }
2256 }
2257 
2258 
2259 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2260   if (dest->is_single_cpu()) {
2261     // first move left into dest so that left is not destroyed by the shift
2262     Register value = dest->as_register();
2263     count = count & 0x1F; // Java spec
2264 
2265     move_regs(left->as_register(), value);
2266     switch (code) {
2267       case lir_shl:  __ shll(value, count); break;
2268       case lir_shr:  __ sarl(value, count); break;
2269       case lir_ushr: __ shrl(value, count); break;
2270       default: ShouldNotReachHere();
2271     }
2272   } else if (dest->is_double_cpu()) {
2273     // first move left into dest so that left is not destroyed by the shift
2274     Register value = dest->as_register_lo();
2275     count = count & 0x1F; // Java spec
2276 
2277     move_regs(left->as_register_lo(), value);
2278     switch (code) {
2279       case lir_shl:  __ shlptr(value, count); break;
2280       case lir_shr:  __ sarptr(value, count); break;
2281       case lir_ushr: __ shrptr(value, count); break;
2282       default: ShouldNotReachHere();
2283     }
2284   } else {
2285     ShouldNotReachHere();
2286   }
2287 }
2288 
2289 
2290 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2291   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2292   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2293   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2294   __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
2295 }
2296 
2297 
2298 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2299   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2300   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2301   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2302   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
2303 }
2304 
2305 
2306 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2307   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2308   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2309   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2310   __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
2311 }
2312 
2313 
2314 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
2315   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2316   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2317   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2318   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
2319 }
2320 
2321 
2322 // This code replaces a call to arraycopy; no exception may
2323 // be thrown in this code, they must be thrown in the System.arraycopy
2324 // activation frame; we could save some checks if this would not be the case
2325 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2326   ciArrayKlass* default_type = op->expected_type();
2327   Register src = op->src()->as_register();
2328   Register dst = op->dst()->as_register();
2329   Register src_pos = op->src_pos()->as_register();
2330   Register dst_pos = op->dst_pos()->as_register();
2331   Register length  = op->length()->as_register();
2332   Register tmp = op->tmp()->as_register();
2333   Register tmp_load_klass = rscratch1;
2334   Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
2335 
2336   CodeStub* stub = op->stub();
2337   int flags = op->flags();
2338   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2339   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2340 
2341   // if we don't know anything, just go through the generic arraycopy
2342   if (default_type == nullptr) {
2343     // save outgoing arguments on stack in case call to System.arraycopy is needed
2344     // HACK ALERT. This code used to push the parameters in a hardwired fashion
2345     // for interpreter calling conventions. Now we have to do it in new style conventions.
2346     // For the moment until C1 gets the new register allocator I just force all the
2347     // args to the right place (except the register args) and then on the back side
2348     // reload the register args properly if we go slow path. Yuck
2349 
2350     // These are proper for the calling convention
2351     store_parameter(length, 2);
2352     store_parameter(dst_pos, 1);
2353     store_parameter(dst, 0);
2354 
2355     // these are just temporary placements until we need to reload
2356     store_parameter(src_pos, 3);
2357     store_parameter(src, 4);
2358 
2359     address copyfunc_addr = StubRoutines::generic_arraycopy();
2360     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2361 
2362     // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
2363     // The arguments are in java calling convention so we can trivially shift them to C
2364     // convention
2365     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2366     __ mov(c_rarg0, j_rarg0);
2367     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2368     __ mov(c_rarg1, j_rarg1);
2369     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2370     __ mov(c_rarg2, j_rarg2);
2371     assert_different_registers(c_rarg3, j_rarg4);
2372     __ mov(c_rarg3, j_rarg3);
2373 #ifdef _WIN64
2374     // Allocate abi space for args but be sure to keep stack aligned
2375     __ subptr(rsp, 6*wordSize);
2376     store_parameter(j_rarg4, 4);
2377 #ifndef PRODUCT
2378     if (PrintC1Statistics) {
2379       __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2380     }
2381 #endif
2382     __ call(RuntimeAddress(copyfunc_addr));
2383     __ addptr(rsp, 6*wordSize);
2384 #else
2385     __ mov(c_rarg4, j_rarg4);
2386 #ifndef PRODUCT
2387     if (PrintC1Statistics) {
2388       __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1);
2389     }
2390 #endif
2391     __ call(RuntimeAddress(copyfunc_addr));
2392 #endif // _WIN64
2393 
2394     __ testl(rax, rax);
2395     __ jcc(Assembler::equal, *stub->continuation());
2396 
2397     __ mov(tmp, rax);
2398     __ xorl(tmp, -1);
2399 
2400     // Reload values from the stack so they are where the stub
2401     // expects them.
2402     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
2403     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
2404     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
2405     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
2406     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
2407 
2408     __ subl(length, tmp);
2409     __ addl(src_pos, tmp);
2410     __ addl(dst_pos, tmp);
2411     __ jmp(*stub->entry());
2412 
2413     __ bind(*stub->continuation());
2414     return;
2415   }
2416 
2417   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2418 
2419   int elem_size = type2aelembytes(basic_type);
2420   Address::ScaleFactor scale;
2421 
2422   switch (elem_size) {
2423     case 1 :
2424       scale = Address::times_1;
2425       break;
2426     case 2 :
2427       scale = Address::times_2;
2428       break;
2429     case 4 :
2430       scale = Address::times_4;
2431       break;
2432     case 8 :
2433       scale = Address::times_8;
2434       break;
2435     default:
2436       scale = Address::no_scale;
2437       ShouldNotReachHere();
2438   }
2439 
2440   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2441   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2442 
2443   // length and pos's are all sign extended at this point on 64bit
2444 
2445   // test for null
2446   if (flags & LIR_OpArrayCopy::src_null_check) {
2447     __ testptr(src, src);
2448     __ jcc(Assembler::zero, *stub->entry());
2449   }
2450   if (flags & LIR_OpArrayCopy::dst_null_check) {
2451     __ testptr(dst, dst);
2452     __ jcc(Assembler::zero, *stub->entry());
2453   }
2454 
2455   // If the compiler was not able to prove that exact type of the source or the destination
2456   // of the arraycopy is an array type, check at runtime if the source or the destination is
2457   // an instance type.
2458   if (flags & LIR_OpArrayCopy::type_check) {
2459     if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2460       __ load_klass(tmp, dst, tmp_load_klass);
2461       __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2462       __ jcc(Assembler::greaterEqual, *stub->entry());
2463     }
2464 
2465     if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2466       __ load_klass(tmp, src, tmp_load_klass);
2467       __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
2468       __ jcc(Assembler::greaterEqual, *stub->entry());
2469     }
2470   }
2471 
2472   // check if negative
2473   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2474     __ testl(src_pos, src_pos);
2475     __ jcc(Assembler::less, *stub->entry());
2476   }
2477   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2478     __ testl(dst_pos, dst_pos);
2479     __ jcc(Assembler::less, *stub->entry());
2480   }
2481 
2482   if (flags & LIR_OpArrayCopy::src_range_check) {
2483     __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
2484     __ cmpl(tmp, src_length_addr);
2485     __ jcc(Assembler::above, *stub->entry());
2486   }
2487   if (flags & LIR_OpArrayCopy::dst_range_check) {
2488     __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
2489     __ cmpl(tmp, dst_length_addr);
2490     __ jcc(Assembler::above, *stub->entry());
2491   }
2492 
2493   if (flags & LIR_OpArrayCopy::length_positive_check) {
2494     __ testl(length, length);
2495     __ jcc(Assembler::less, *stub->entry());
2496   }
2497 
2498   __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
2499   __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
2500 
2501   if (flags & LIR_OpArrayCopy::type_check) {
2502     // We don't know the array types are compatible
2503     if (basic_type != T_OBJECT) {
2504       // Simple test for basic type arrays
2505       __ cmp_klasses_from_objects(src, dst, tmp, tmp2);
2506       __ jcc(Assembler::notEqual, *stub->entry());
2507     } else {
2508       // For object arrays, if src is a sub class of dst then we can
2509       // safely do the copy.
2510       Label cont, slow;
2511 
2512       __ push_ppx(src);
2513       __ push_ppx(dst);
2514 
2515       __ load_klass(src, src, tmp_load_klass);
2516       __ load_klass(dst, dst, tmp_load_klass);
2517 
2518       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
2519 
2520       __ push_ppx(src);
2521       __ push_ppx(dst);
2522       __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id)));
2523       __ pop_ppx(dst);
2524       __ pop_ppx(src);
2525 
2526       __ testl(src, src);
2527       __ jcc(Assembler::notEqual, cont);
2528 
2529       __ bind(slow);
2530       __ pop_ppx(dst);
2531       __ pop_ppx(src);
2532 
2533       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2534       if (copyfunc_addr != nullptr) { // use stub if available
2535         // src is not a sub class of dst so we have to do a
2536         // per-element check.
2537 
2538         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2539         if ((flags & mask) != mask) {
2540           // Check that at least both of them object arrays.
2541           assert(flags & mask, "one of the two should be known to be an object array");
2542 
2543           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2544             __ load_klass(tmp, src, tmp_load_klass);
2545           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2546             __ load_klass(tmp, dst, tmp_load_klass);
2547           }
2548           int lh_offset = in_bytes(Klass::layout_helper_offset());
2549           Address klass_lh_addr(tmp, lh_offset);
2550           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2551           __ cmpl(klass_lh_addr, objArray_lh);
2552           __ jcc(Assembler::notEqual, *stub->entry());
2553         }
2554 
2555        // Spill because stubs can use any register they like and it's
2556        // easier to restore just those that we care about.
2557        store_parameter(dst, 0);
2558        store_parameter(dst_pos, 1);
2559        store_parameter(length, 2);
2560        store_parameter(src_pos, 3);
2561        store_parameter(src, 4);
2562 
2563         __ movl2ptr(length, length); //higher 32bits must be null
2564 
2565         __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2566         assert_different_registers(c_rarg0, dst, dst_pos, length);
2567         __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2568         assert_different_registers(c_rarg1, dst, length);
2569 
2570         __ mov(c_rarg2, length);
2571         assert_different_registers(c_rarg2, dst);
2572 
2573 #ifdef _WIN64
2574         // Allocate abi space for args but be sure to keep stack aligned
2575         __ subptr(rsp, 6*wordSize);
2576         __ load_klass(c_rarg3, dst, tmp_load_klass);
2577         __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
2578         store_parameter(c_rarg3, 4);
2579         __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
2580         __ call(RuntimeAddress(copyfunc_addr));
2581         __ addptr(rsp, 6*wordSize);
2582 #else
2583         __ load_klass(c_rarg4, dst, tmp_load_klass);
2584         __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2585         __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2586         __ call(RuntimeAddress(copyfunc_addr));
2587 #endif
2588 
2589 #ifndef PRODUCT
2590         if (PrintC1Statistics) {
2591           Label failed;
2592           __ testl(rax, rax);
2593           __ jcc(Assembler::notZero, failed);
2594           __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1);
2595           __ bind(failed);
2596         }
2597 #endif
2598 
2599         __ testl(rax, rax);
2600         __ jcc(Assembler::zero, *stub->continuation());
2601 
2602 #ifndef PRODUCT
2603         if (PrintC1Statistics) {
2604           __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1);
2605         }
2606 #endif
2607 
2608         __ mov(tmp, rax);
2609 
2610         __ xorl(tmp, -1);
2611 
2612         // Restore previously spilled arguments
2613         __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
2614         __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
2615         __ movptr   (length,  Address(rsp, 2*BytesPerWord));
2616         __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
2617         __ movptr   (src,     Address(rsp, 4*BytesPerWord));
2618 
2619 
2620         __ subl(length, tmp);
2621         __ addl(src_pos, tmp);
2622         __ addl(dst_pos, tmp);
2623       }
2624 
2625       __ jmp(*stub->entry());
2626 
2627       __ bind(cont);
2628       __ pop(dst);
2629       __ pop(src);
2630     }
2631   }
2632 
2633 #ifdef ASSERT
2634   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2635     // Sanity check the known type with the incoming class.  For the
2636     // primitive case the types must match exactly with src.klass and
2637     // dst.klass each exactly matching the default type.  For the
2638     // object array case, if no type check is needed then either the
2639     // dst type is exactly the expected type and the src type is a
2640     // subtype which we can't check or src is the same array as dst
2641     // but not necessarily exactly of type default_type.
2642     Label known_ok, halt;
2643     __ mov_metadata(tmp, default_type->constant_encoding());
2644     if (UseCompressedClassPointers) {
2645       __ encode_klass_not_null(tmp, rscratch1);
2646     }
2647 
2648     if (basic_type != T_OBJECT) {
2649       __ cmp_klass(tmp, dst, tmp2);
2650       __ jcc(Assembler::notEqual, halt);
2651       __ cmp_klass(tmp, src, tmp2);
2652       __ jcc(Assembler::equal, known_ok);
2653     } else {
2654       __ cmp_klass(tmp, dst, tmp2);
2655       __ jcc(Assembler::equal, known_ok);
2656       __ cmpptr(src, dst);
2657       __ jcc(Assembler::equal, known_ok);
2658     }
2659     __ bind(halt);
2660     __ stop("incorrect type information in arraycopy");
2661     __ bind(known_ok);
2662   }
2663 #endif
2664 
2665 #ifndef PRODUCT
2666   if (PrintC1Statistics) {
2667     __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
2668   }
2669 #endif
2670 
2671   assert_different_registers(c_rarg0, dst, dst_pos, length);
2672   __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2673   assert_different_registers(c_rarg1, length);
2674   __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2675   __ mov(c_rarg2, length);
2676 
2677   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2678   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2679   const char *name;
2680   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2681   __ call_VM_leaf(entry, 0);
2682 
2683   if (stub != nullptr) {
2684     __ bind(*stub->continuation());
2685   }
2686 }
2687 
2688 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2689   assert(op->crc()->is_single_cpu(),  "crc must be register");
2690   assert(op->val()->is_single_cpu(),  "byte value must be register");
2691   assert(op->result_opr()->is_single_cpu(), "result must be register");
2692   Register crc = op->crc()->as_register();
2693   Register val = op->val()->as_register();
2694   Register res = op->result_opr()->as_register();
2695 
2696   assert_different_registers(val, crc, res);
2697 
2698   __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
2699   __ notl(crc); // ~crc
2700   __ update_byte_crc32(crc, val, res);
2701   __ notl(crc); // ~crc
2702   __ mov(res, crc);
2703 }
2704 
2705 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2706   Register obj = op->obj_opr()->as_register();  // may not be an oop
2707   Register hdr = op->hdr_opr()->as_register();
2708   Register lock = op->lock_opr()->as_register();
2709   if (op->code() == lir_lock) {
2710     Register tmp = op->scratch_opr()->as_register();
2711     // add debug info for NullPointerException only if one is possible
2712     int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2713     if (op->info() != nullptr) {
2714       add_debug_info_for_null_check(null_check_offset, op->info());
2715     }
2716     // done
2717   } else if (op->code() == lir_unlock) {
2718     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2719   } else {
2720     Unimplemented();
2721   }
2722   __ bind(*op->stub()->continuation());
2723 }
2724 
2725 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2726   Register obj = op->obj()->as_pointer_register();
2727   Register result = op->result_opr()->as_pointer_register();
2728 
2729   CodeEmitInfo* info = op->info();
2730   if (info != nullptr) {
2731     add_debug_info_for_null_check_here(info);
2732   }
2733 
2734   __ load_klass(result, obj, rscratch1);
2735 }
2736 
2737 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2738   ciMethod* method = op->profiled_method();
2739   int bci          = op->profiled_bci();
2740   ciMethod* callee = op->profiled_callee();
2741   Register tmp_load_klass = rscratch1;
2742 
2743   // Update counter for all call types
2744   ciMethodData* md = method->method_data_or_null();
2745   assert(md != nullptr, "Sanity");
2746   ciProfileData* data = md->bci_to_data(bci);
2747   assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2748   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2749   Register mdo  = op->mdo()->as_register();
2750   __ mov_metadata(mdo, md->constant_encoding());
2751   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2752   // Perform additional virtual call profiling for invokevirtual and
2753   // invokeinterface bytecodes
2754   if (op->should_profile_receiver_type()) {
2755     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2756     Register recv = op->recv()->as_register();
2757     assert_different_registers(mdo, recv);
2758     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2759     ciKlass* known_klass = op->known_holder();
2760     if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2761       // We know the type that will be seen at this call site; we can
2762       // statically update the MethodData* rather than needing to do
2763       // dynamic tests on the receiver type.
2764       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2765       for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
2766         ciKlass* receiver = vc_data->receiver(i);
2767         if (known_klass->equals(receiver)) {
2768           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2769           __ addptr(data_addr, DataLayout::counter_increment);
2770           return;
2771         }
2772       }
2773       // Receiver type is not found in profile data.
2774       // Fall back to runtime helper to handle the rest at runtime.
2775       __ mov_metadata(recv, known_klass->constant_encoding());
2776     } else {
2777       __ load_klass(recv, recv, tmp_load_klass);
2778     }
2779     type_profile_helper(mdo, md, data, recv);
2780   } else {
2781     // Static call
2782     __ addptr(counter_addr, DataLayout::counter_increment);
2783   }
2784 }
2785 
2786 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2787   Register obj = op->obj()->as_register();
2788   Register tmp = op->tmp()->as_pointer_register();
2789   Register tmp_load_klass = rscratch1;
2790   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2791   ciKlass* exact_klass = op->exact_klass();
2792   intptr_t current_klass = op->current_klass();
2793   bool not_null = op->not_null();
2794   bool no_conflict = op->no_conflict();
2795 
2796   Label update, next, none;
2797 
2798   bool do_null = !not_null;
2799   bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2800   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2801 
2802   assert(do_null || do_update, "why are we here?");
2803   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2804 
2805   __ verify_oop(obj);
2806 
2807 #ifdef ASSERT
2808   if (obj == tmp) {
2809     assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
2810   } else {
2811     assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
2812   }
2813 #endif
2814   if (do_null) {
2815     __ testptr(obj, obj);
2816     __ jccb(Assembler::notZero, update);
2817     if (!TypeEntries::was_null_seen(current_klass)) {
2818       __ testptr(mdo_addr, TypeEntries::null_seen);
2819 #ifndef ASSERT
2820       __ jccb(Assembler::notZero, next); // already set
2821 #else
2822       __ jcc(Assembler::notZero, next); // already set
2823 #endif
2824       // atomic update to prevent overwriting Klass* with 0
2825       __ lock();
2826       __ orptr(mdo_addr, TypeEntries::null_seen);
2827     }
2828     if (do_update) {
2829 #ifndef ASSERT
2830       __ jmpb(next);
2831     }
2832 #else
2833       __ jmp(next);
2834     }
2835   } else {
2836     __ testptr(obj, obj);
2837     __ jcc(Assembler::notZero, update);
2838     __ stop("unexpected null obj");
2839 #endif
2840   }
2841 
2842   __ bind(update);
2843 
2844   if (do_update) {
2845 #ifdef ASSERT
2846     if (exact_klass != nullptr) {
2847       Label ok;
2848       __ load_klass(tmp, obj, tmp_load_klass);
2849       __ push_ppx(tmp);
2850       __ mov_metadata(tmp, exact_klass->constant_encoding());
2851       __ cmpptr(tmp, Address(rsp, 0));
2852       __ jcc(Assembler::equal, ok);
2853       __ stop("exact klass and actual klass differ");
2854       __ bind(ok);
2855       __ pop_ppx(tmp);
2856     }
2857 #endif
2858     if (!no_conflict) {
2859       if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2860         if (exact_klass != nullptr) {
2861           __ mov_metadata(tmp, exact_klass->constant_encoding());
2862         } else {
2863           __ load_klass(tmp, obj, tmp_load_klass);
2864         }
2865         __ mov(rscratch1, tmp); // save original value before XOR
2866         __ xorptr(tmp, mdo_addr);
2867         __ testptr(tmp, TypeEntries::type_klass_mask);
2868         // klass seen before, nothing to do. The unknown bit may have been
2869         // set already but no need to check.
2870         __ jccb(Assembler::zero, next);
2871 
2872         __ testptr(tmp, TypeEntries::type_unknown);
2873         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2874 
2875         if (TypeEntries::is_type_none(current_klass)) {
2876           __ testptr(mdo_addr, TypeEntries::type_mask);
2877           __ jccb(Assembler::zero, none);
2878           // There is a chance that the checks above (re-reading profiling
2879           // data from memory) fail if another thread has just set the
2880           // profiling to this obj's klass
2881           __ mov(tmp, rscratch1); // get back original value before XOR
2882           __ xorptr(tmp, mdo_addr);
2883           __ testptr(tmp, TypeEntries::type_klass_mask);
2884           __ jccb(Assembler::zero, next);
2885         }
2886       } else {
2887         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2888                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2889 
2890         __ testptr(mdo_addr, TypeEntries::type_unknown);
2891         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2892       }
2893 
2894       // different than before. Cannot keep accurate profile.
2895       __ orptr(mdo_addr, TypeEntries::type_unknown);
2896 
2897       if (TypeEntries::is_type_none(current_klass)) {
2898         __ jmpb(next);
2899 
2900         __ bind(none);
2901         // first time here. Set profile type.
2902         __ movptr(mdo_addr, tmp);
2903 #ifdef ASSERT
2904         __ andptr(tmp, TypeEntries::type_klass_mask);
2905         __ verify_klass_ptr(tmp);
2906 #endif
2907       }
2908     } else {
2909       // There's a single possible klass at this profile point
2910       assert(exact_klass != nullptr, "should be");
2911       if (TypeEntries::is_type_none(current_klass)) {
2912         __ mov_metadata(tmp, exact_klass->constant_encoding());
2913         __ xorptr(tmp, mdo_addr);
2914         __ testptr(tmp, TypeEntries::type_klass_mask);
2915 #ifdef ASSERT
2916         __ jcc(Assembler::zero, next);
2917 
2918         {
2919           Label ok;
2920           __ push_ppx(tmp);
2921           __ testptr(mdo_addr, TypeEntries::type_mask);
2922           __ jcc(Assembler::zero, ok);
2923           // may have been set by another thread
2924           __ mov_metadata(tmp, exact_klass->constant_encoding());
2925           __ xorptr(tmp, mdo_addr);
2926           __ testptr(tmp, TypeEntries::type_mask);
2927           __ jcc(Assembler::zero, ok);
2928 
2929           __ stop("unexpected profiling mismatch");
2930           __ bind(ok);
2931           __ pop_ppx(tmp);
2932         }
2933 #else
2934         __ jccb(Assembler::zero, next);
2935 #endif
2936         // first time here. Set profile type.
2937         __ movptr(mdo_addr, tmp);
2938 #ifdef ASSERT
2939         __ andptr(tmp, TypeEntries::type_klass_mask);
2940         __ verify_klass_ptr(tmp);
2941 #endif
2942       } else {
2943         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2944                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2945 
2946         __ testptr(mdo_addr, TypeEntries::type_unknown);
2947         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
2948 
2949         __ orptr(mdo_addr, TypeEntries::type_unknown);
2950       }
2951     }
2952   }
2953   __ bind(next);
2954 }
2955 
2956 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2957   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2958 }
2959 
2960 
2961 void LIR_Assembler::align_backward_branch_target() {
2962   __ align(BytesPerWord);
2963 }
2964 
2965 
2966 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2967   if (left->is_single_cpu()) {
2968     __ negl(left->as_register());
2969     move_regs(left->as_register(), dest->as_register());
2970 
2971   } else if (left->is_double_cpu()) {
2972     Register lo = left->as_register_lo();
2973     Register dst = dest->as_register_lo();
2974     __ movptr(dst, lo);
2975     __ negptr(dst);
2976 
2977   } else if (dest->is_single_xmm()) {
2978     assert(!tmp->is_valid(), "do not need temporary");
2979     if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
2980       __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
2981     }
2982     __ xorps(dest->as_xmm_float_reg(),
2983              ExternalAddress((address)float_signflip_pool),
2984              rscratch1);
2985   } else if (dest->is_double_xmm()) {
2986     assert(!tmp->is_valid(), "do not need temporary");
2987     if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
2988       __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
2989     }
2990     __ xorpd(dest->as_xmm_double_reg(),
2991              ExternalAddress((address)double_signflip_pool),
2992              rscratch1);
2993   } else {
2994     ShouldNotReachHere();
2995   }
2996 }
2997 
2998 
2999 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3000   assert(src->is_address(), "must be an address");
3001   assert(dest->is_register(), "must be a register");
3002 
3003   PatchingStub* patch = nullptr;
3004   if (patch_code != lir_patch_none) {
3005     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3006   }
3007 
3008   Register reg = dest->as_pointer_register();
3009   LIR_Address* addr = src->as_address_ptr();
3010   __ lea(reg, as_Address(addr));
3011 
3012   if (patch != nullptr) {
3013     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3014   }
3015 }
3016 
3017 
3018 
3019 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3020   assert(!tmp->is_valid(), "don't need temporary");
3021   __ call(RuntimeAddress(dest));
3022   if (info != nullptr) {
3023     add_call_info_here(info);
3024   }
3025   __ post_call_nop();
3026 }
3027 
3028 
3029 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3030   assert(type == T_LONG, "only for volatile long fields");
3031 
3032   if (info != nullptr) {
3033     add_debug_info_for_null_check_here(info);
3034   }
3035 
3036   if (src->is_double_xmm()) {
3037     if (dest->is_double_cpu()) {
3038       __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3039     } else if (dest->is_double_stack()) {
3040       __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3041     } else if (dest->is_address()) {
3042       __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3043     } else {
3044       ShouldNotReachHere();
3045     }
3046 
3047   } else if (dest->is_double_xmm()) {
3048     if (src->is_double_stack()) {
3049       __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3050     } else if (src->is_address()) {
3051       __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3052     } else {
3053       ShouldNotReachHere();
3054     }
3055 
3056   } else {
3057     ShouldNotReachHere();
3058   }
3059 }
3060 
3061 #ifdef ASSERT
3062 // emit run-time assertion
3063 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3064   assert(op->code() == lir_assert, "must be");
3065 
3066   if (op->in_opr1()->is_valid()) {
3067     assert(op->in_opr2()->is_valid(), "both operands must be valid");
3068     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3069   } else {
3070     assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3071     assert(op->condition() == lir_cond_always, "no other conditions allowed");
3072   }
3073 
3074   Label ok;
3075   if (op->condition() != lir_cond_always) {
3076     Assembler::Condition acond = Assembler::zero;
3077     switch (op->condition()) {
3078       case lir_cond_equal:        acond = Assembler::equal;       break;
3079       case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
3080       case lir_cond_less:         acond = Assembler::less;        break;
3081       case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
3082       case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3083       case lir_cond_greater:      acond = Assembler::greater;     break;
3084       case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
3085       case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
3086       default:                    ShouldNotReachHere();
3087     }
3088     __ jcc(acond, ok);
3089   }
3090   if (op->halt()) {
3091     const char* str = __ code_string(op->msg());
3092     __ stop(str);
3093   } else {
3094     breakpoint();
3095   }
3096   __ bind(ok);
3097 }
3098 #endif
3099 
3100 void LIR_Assembler::membar() {
3101   // QQQ sparc TSO uses this,
3102   __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3103 }
3104 
3105 void LIR_Assembler::membar_acquire() {
3106   // No x86 machines currently require load fences
3107 }
3108 
3109 void LIR_Assembler::membar_release() {
3110   // No x86 machines currently require store fences
3111 }
3112 
3113 void LIR_Assembler::membar_loadload() {
3114   // no-op
3115   //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3116 }
3117 
3118 void LIR_Assembler::membar_storestore() {
3119   // no-op
3120   //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3121 }
3122 
3123 void LIR_Assembler::membar_loadstore() {
3124   // no-op
3125   //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3126 }
3127 
3128 void LIR_Assembler::membar_storeload() {
3129   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3130 }
3131 
3132 void LIR_Assembler::on_spin_wait() {
3133   __ pause ();
3134 }
3135 
3136 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3137   assert(result_reg->is_register(), "check");
3138   __ mov(result_reg->as_register(), r15_thread);
3139 }
3140 
3141 
3142 void LIR_Assembler::peephole(LIR_List*) {
3143   // do nothing for now
3144 }
3145 
3146 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3147   assert(data == dest, "xchg/xadd uses only 2 operands");
3148 
3149   if (data->type() == T_INT) {
3150     if (code == lir_xadd) {
3151       __ lock();
3152       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3153     } else {
3154       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3155     }
3156   } else if (data->is_oop()) {
3157     assert (code == lir_xchg, "xadd for oops");
3158     Register obj = data->as_register();
3159     if (UseCompressedOops) {
3160       __ encode_heap_oop(obj);
3161       __ xchgl(obj, as_Address(src->as_address_ptr()));
3162       __ decode_heap_oop(obj);
3163     } else {
3164       __ xchgptr(obj, as_Address(src->as_address_ptr()));
3165     }
3166   } else if (data->type() == T_LONG) {
3167     assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3168     if (code == lir_xadd) {
3169       __ lock();
3170       __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3171     } else {
3172       __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3173     }
3174   } else {
3175     ShouldNotReachHere();
3176   }
3177 }
3178 
3179 #undef __