1 /*
   2  * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArray.hpp"
  34 #include "ci/ciInlineKlass.hpp"
  35 #include "ci/ciObjArrayKlass.hpp"
  36 #include "ci/ciTypeArrayKlass.hpp"
  37 #include "gc/shared/c1/barrierSetC1.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 #include "vmreg_x86.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52   LIR_Opr res = result();
  53 
  54   if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
  55     // make sure that it is a byte register
  56     assert(!value()->type()->is_float() && !value()->type()->is_double(),
  57            "can't load floats in byte register");
  58     LIR_Opr reg = _gen->rlock_byte(T_BYTE);
  59     __ move(res, reg);
  60 
  61     _result = reg;
  62   }
  63 }
  64 
  65 
  66 void LIRItem::load_nonconstant() {
  67   LIR_Opr r = value()->operand();
  68   if (r->is_constant()) {
  69     _result = r;
  70   } else {
  71     load_item();
  72   }
  73 }
  74 
  75 //--------------------------------------------------------------
  76 //               LIRGenerator
  77 //--------------------------------------------------------------
  78 
  79 
  80 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; }
  81 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::rdx_opr; }
  82 LIR_Opr LIRGenerator::divInOpr()        { return FrameMap::rax_opr; }
  83 LIR_Opr LIRGenerator::divOutOpr()       { return FrameMap::rax_opr; }
  84 LIR_Opr LIRGenerator::remOutOpr()       { return FrameMap::rdx_opr; }
  85 LIR_Opr LIRGenerator::shiftCountOpr()   { return FrameMap::rcx_opr; }
  86 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
  87 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::rax_opr; }
  88 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  89 
  90 
  91 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  92   LIR_Opr opr;
  93   switch (type->tag()) {
  94     case intTag:     opr = FrameMap::rax_opr;          break;
  95     case objectTag:  opr = FrameMap::rax_oop_opr;      break;
  96     case longTag:    opr = FrameMap::long0_opr;        break;
  97 #ifdef _LP64
  98     case floatTag:   opr = FrameMap::xmm0_float_opr;   break;
  99     case doubleTag:  opr = FrameMap::xmm0_double_opr;  break;
 100 #else
 101     case floatTag:   opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr  : FrameMap::fpu0_float_opr;  break;
 102     case doubleTag:  opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr;  break;
 103 #endif // _LP64
 104     case addressTag:
 105     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
 106   }
 107 
 108   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
 109   return opr;
 110 }
 111 
 112 
 113 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 114   LIR_Opr reg = new_register(T_INT);
 115   set_vreg_flag(reg, LIRGenerator::byte_reg);
 116   return reg;
 117 }
 118 
 119 
 120 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
 121   // We just need one 32-bit temp register for x86/x64, to check whether both
 122   // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck().
 123   // @temp = %r10d
 124   // mov $0x405, %r10d
 125   // and (%left), %r10d   /* if need to check left */
 126   // and (%right), %r10d  /* if need to check right */
 127   // cmp $0x405, $r10d
 128   // jne L_oops_not_equal
 129   tmp1 = new_register(T_INT);
 130   tmp2 = LIR_OprFact::illegalOpr;
 131 }
 132 
 133 //--------- loading items into registers --------------------------------
 134 
 135 
 136 // i486 instructions can inline constants
 137 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 138   if (type == T_SHORT || type == T_CHAR) {
 139     return false;
 140   }
 141   Constant* c = v->as_Constant();
 142   if (c && c->state_before() == nullptr) {
 143     // constants of any type can be stored directly, except for
 144     // unloaded object constants.
 145     return true;
 146   }
 147   return false;
 148 }
 149 
 150 
 151 bool LIRGenerator::can_inline_as_constant(Value v) const {
 152   if (v->type()->tag() == longTag) return false;
 153   return v->type()->tag() != objectTag ||
 154     (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
 155 }
 156 
 157 
 158 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 159   if (c->type() == T_LONG) return false;
 160   return c->type() != T_OBJECT || c->as_jobject() == nullptr;
 161 }
 162 
 163 
 164 LIR_Opr LIRGenerator::safepoint_poll_register() {
 165   NOT_LP64( return new_register(T_ADDRESS); )
 166   return LIR_OprFact::illegalOpr;
 167 }
 168 
 169 
 170 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 171                                             int shift, int disp, BasicType type) {
 172   assert(base->is_register(), "must be");
 173   if (index->is_constant()) {
 174     LIR_Const *constant = index->as_constant_ptr();
 175 #ifdef _LP64
 176     jlong c;
 177     if (constant->type() == T_INT) {
 178       c = (jlong(index->as_jint()) << shift) + disp;
 179     } else {
 180       assert(constant->type() == T_LONG, "should be");
 181       c = (index->as_jlong() << shift) + disp;
 182     }
 183     if ((jlong)((jint)c) == c) {
 184       return new LIR_Address(base, (jint)c, type);
 185     } else {
 186       LIR_Opr tmp = new_register(T_LONG);
 187       __ move(index, tmp);
 188       return new LIR_Address(base, tmp, type);
 189     }
 190 #else
 191     return new LIR_Address(base,
 192                            ((intx)(constant->as_jint()) << shift) + disp,
 193                            type);
 194 #endif
 195   } else {
 196     return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
 197   }
 198 }
 199 
 200 
 201 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 202                                               BasicType type) {
 203   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 204 
 205   LIR_Address* addr;
 206   if (index_opr->is_constant()) {
 207     int elem_size = type2aelembytes(type);
 208 #ifdef _LP64
 209     jint index = index_opr->as_jint();
 210     jlong disp = offset_in_bytes + (jlong)(index) * elem_size;
 211     if (disp > max_jint) {
 212       // Displacement overflow. Cannot directly use instruction with 32-bit displacement for 64-bit addresses.
 213       // Convert array index to long to do array offset computation with 64-bit values.
 214       index_opr = new_register(T_LONG);
 215       __ move(LIR_OprFact::longConst(index), index_opr);
 216       addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), offset_in_bytes, type);
 217     } else {
 218       addr = new LIR_Address(array_opr, (intx)disp, type);
 219     }
 220 #else
 221     // A displacement overflow can also occur for x86 but that is not a problem due to the 32-bit address range!
 222     // Let's assume an array 'a' and an access with displacement 'disp'. When disp overflows, then "a + disp" will
 223     // always be negative (i.e. underflows the 32-bit address range):
 224     // Let N = 2^32: a + signed_overflow(disp) = a + disp - N.
 225     // "a + disp" is always smaller than N. If an index was chosen which would point to an address beyond N, then
 226     // range checks would catch that and throw an exception. Thus, a + disp < 0 holds which means that it always
 227     // underflows the 32-bit address range:
 228     // unsigned_underflow(a + signed_overflow(disp)) = unsigned_underflow(a + disp - N)
 229     //                                              = (a + disp - N) + N = a + disp
 230     // This shows that we still end up at the correct address with a displacement overflow due to the 32-bit address
 231     // range limitation. This overflow only needs to be handled if addresses can be larger as on 64-bit platforms.
 232     addr = new LIR_Address(array_opr, offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 233 #endif // _LP64
 234   } else {
 235 #ifdef _LP64
 236     if (index_opr->type() == T_INT) {
 237       LIR_Opr tmp = new_register(T_LONG);
 238       __ convert(Bytecodes::_i2l, index_opr, tmp);
 239       index_opr = tmp;
 240     }
 241 #endif // _LP64
 242     addr =  new LIR_Address(array_opr,
 243                             index_opr,
 244                             LIR_Address::scale(type),
 245                             offset_in_bytes, type);
 246   }
 247   return addr;
 248 }
 249 
 250 
 251 LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) {
 252   LIR_Opr r;
 253   if (type == T_LONG) {
 254     r = LIR_OprFact::longConst(x);
 255   } else if (type == T_INT) {
 256     r = LIR_OprFact::intConst(checked_cast<jint>(x));
 257   } else {
 258     ShouldNotReachHere();
 259   }
 260   return r;
 261 }
 262 
 263 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 264   LIR_Opr pointer = new_pointer_register();
 265   __ move(LIR_OprFact::intptrConst(counter), pointer);
 266   LIR_Address* addr = new LIR_Address(pointer, type);
 267   increment_counter(addr, step);
 268 }
 269 
 270 
 271 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 272   __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
 273 }
 274 
 275 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 276   __ cmp_mem_int(condition, base, disp, c, info);
 277 }
 278 
 279 
 280 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 281   __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
 282 }
 283 
 284 
 285 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
 286   if (tmp->is_valid() && c > 0 && c < max_jint) {
 287     if (is_power_of_2(c + 1)) {
 288       __ move(left, tmp);
 289       __ shift_left(left, log2i_exact(c + 1), left);
 290       __ sub(left, tmp, result);
 291       return true;
 292     } else if (is_power_of_2(c - 1)) {
 293       __ move(left, tmp);
 294       __ shift_left(left, log2i_exact(c - 1), left);
 295       __ add(left, tmp, result);
 296       return true;
 297     }
 298   }
 299   return false;
 300 }
 301 
 302 
 303 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 304   BasicType type = item->type();
 305   __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
 306 }
 307 
 308 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 309   LIR_Opr tmp1 = new_register(objectType);
 310   LIR_Opr tmp2 = new_register(objectType);
 311   LIR_Opr tmp3 = new_register(objectType);
 312   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 313 }
 314 
 315 //----------------------------------------------------------------------
 316 //             visitor functions
 317 //----------------------------------------------------------------------
 318 
 319 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 320   assert(x->is_pinned(),"");
 321   LIRItem obj(x->obj(), this);
 322   obj.load_item();
 323 
 324   set_no_result(x);
 325 
 326   // "lock" stores the address of the monitor stack slot, so this is not an oop
 327   LIR_Opr lock = new_register(T_INT);
 328   // Need a scratch register for inline types on x86
 329   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 330   if ((LockingMode == LM_LIGHTWEIGHT) ||
 331       (EnableValhalla && x->maybe_inlinetype())) {
 332     scratch = new_register(T_ADDRESS);
 333   }
 334 
 335   CodeEmitInfo* info_for_exception = nullptr;
 336   if (x->needs_null_check()) {
 337     info_for_exception = state_for(x);
 338   }
 339 
 340   CodeStub* throw_imse_stub = x->maybe_inlinetype() ?
 341       new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id,
 342                               LIR_OprFact::illegalOpr, state_for(x))
 343     : nullptr;
 344 
 345   // this CodeEmitInfo must not have the xhandlers because here the
 346   // object is already locked (xhandlers expect object to be unlocked)
 347   CodeEmitInfo* info = state_for(x, x->state(), true);
 348   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 349                 x->monitor_no(), info_for_exception, info, throw_imse_stub);
 350 }
 351 
 352 
 353 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 354   assert(x->is_pinned(),"");
 355 
 356   LIRItem obj(x->obj(), this);
 357   obj.dont_load_item();
 358 
 359   LIR_Opr lock = new_register(T_INT);
 360   LIR_Opr obj_temp = new_register(T_INT);
 361   set_no_result(x);
 362   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 363 }
 364 
 365 // _ineg, _lneg, _fneg, _dneg
 366 void LIRGenerator::do_NegateOp(NegateOp* x) {
 367   LIRItem value(x->x(), this);
 368   value.set_destroys_register();
 369   value.load_item();
 370   LIR_Opr reg = rlock(x);
 371 
 372   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 373 #ifdef _LP64
 374   if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
 375     if (x->type()->tag() == doubleTag) {
 376       tmp = new_register(T_DOUBLE);
 377       __ move(LIR_OprFact::doubleConst(-0.0), tmp);
 378     }
 379     else if (x->type()->tag() == floatTag) {
 380       tmp = new_register(T_FLOAT);
 381       __ move(LIR_OprFact::floatConst(-0.0), tmp);
 382     }
 383   }
 384 #endif
 385   __ negate(value.result(), reg, tmp);
 386 
 387   set_result(x, round_item(reg));
 388 }
 389 
 390 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 391 //      _dadd, _dmul, _dsub, _ddiv, _drem
 392 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 393   LIRItem left(x->x(),  this);
 394   LIRItem right(x->y(), this);
 395   LIRItem* left_arg  = &left;
 396   LIRItem* right_arg = &right;
 397   assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands");
 398   bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem);
 399   if (left.is_register() || x->x()->type()->is_constant() || must_load_both) {
 400     left.load_item();
 401   } else {
 402     left.dont_load_item();
 403   }
 404 
 405 #ifndef _LP64
 406   // do not load right operand if it is a constant.  only 0 and 1 are
 407   // loaded because there are special instructions for loading them
 408   // without memory access (not needed for SSE2 instructions)
 409   bool must_load_right = false;
 410   if (right.is_constant()) {
 411     LIR_Const* c = right.result()->as_constant_ptr();
 412     assert(c != nullptr, "invalid constant");
 413     assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type");
 414 
 415     if (c->type() == T_FLOAT) {
 416       must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float());
 417     } else {
 418       must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double());
 419     }
 420   }
 421 #endif // !LP64
 422 
 423   if (must_load_both) {
 424     // frem and drem destroy also right operand, so move it to a new register
 425     right.set_destroys_register();
 426     right.load_item();
 427   } else if (right.is_register()) {
 428     right.load_item();
 429 #ifndef _LP64
 430   } else if (must_load_right) {
 431     right.load_item();
 432 #endif // !LP64
 433   } else {
 434     right.dont_load_item();
 435   }
 436   LIR_Opr reg = rlock(x);
 437   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 438   if (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv) {
 439     tmp = new_register(T_DOUBLE);
 440   }
 441 
 442 #ifdef _LP64
 443   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
 444     // frem and drem are implemented as a direct call into the runtime.
 445     LIRItem left(x->x(), this);
 446     LIRItem right(x->y(), this);
 447 
 448     BasicType bt = as_BasicType(x->type());
 449     BasicTypeList signature(2);
 450     signature.append(bt);
 451     signature.append(bt);
 452     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 453 
 454     const LIR_Opr result_reg = result_register_for(x->type());
 455     left.load_item_force(cc->at(0));
 456     right.load_item_force(cc->at(1));
 457 
 458     address entry = nullptr;
 459     switch (x->op()) {
 460       case Bytecodes::_frem:
 461         entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 462         break;
 463       case Bytecodes::_drem:
 464         entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 465         break;
 466       default:
 467         ShouldNotReachHere();
 468     }
 469 
 470     LIR_Opr result = rlock_result(x);
 471     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 472     __ move(result_reg, result);
 473   } else {
 474     arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp);
 475     set_result(x, round_item(reg));
 476   }
 477 #else
 478   if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) {
 479     // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots
 480     LIR_Opr fpu0, fpu1;
 481     if (x->op() == Bytecodes::_frem) {
 482       fpu0 = LIR_OprFact::single_fpu(0);
 483       fpu1 = LIR_OprFact::single_fpu(1);
 484     } else {
 485       fpu0 = LIR_OprFact::double_fpu(0);
 486       fpu1 = LIR_OprFact::double_fpu(1);
 487     }
 488     __ move(right.result(), fpu1); // order of left and right operand is important!
 489     __ move(left.result(), fpu0);
 490     __ rem (fpu0, fpu1, fpu0);
 491     __ move(fpu0, reg);
 492 
 493   } else {
 494     arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp);
 495   }
 496   set_result(x, round_item(reg));
 497 #endif // _LP64
 498 }
 499 
 500 
 501 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 502 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 503   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) {
 504     // long division is implemented as a direct call into the runtime
 505     LIRItem left(x->x(), this);
 506     LIRItem right(x->y(), this);
 507 
 508     // the check for division by zero destroys the right operand
 509     right.set_destroys_register();
 510 
 511     BasicTypeList signature(2);
 512     signature.append(T_LONG);
 513     signature.append(T_LONG);
 514     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 515 
 516     // check for division by zero (destroys registers of right operand!)
 517     CodeEmitInfo* info = state_for(x);
 518 
 519     const LIR_Opr result_reg = result_register_for(x->type());
 520     left.load_item_force(cc->at(1));
 521     right.load_item();
 522 
 523     __ move(right.result(), cc->at(0));
 524 
 525     __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 526     __ branch(lir_cond_equal, new DivByZeroStub(info));
 527 
 528     address entry = nullptr;
 529     switch (x->op()) {
 530     case Bytecodes::_lrem:
 531       entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
 532       break; // check if dividend is 0 is done elsewhere
 533     case Bytecodes::_ldiv:
 534       entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
 535       break; // check if dividend is 0 is done elsewhere
 536     default:
 537       ShouldNotReachHere();
 538     }
 539 
 540     LIR_Opr result = rlock_result(x);
 541     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 542     __ move(result_reg, result);
 543   } else if (x->op() == Bytecodes::_lmul) {
 544     // missing test if instr is commutative and if we should swap
 545     LIRItem left(x->x(), this);
 546     LIRItem right(x->y(), this);
 547 
 548     // right register is destroyed by the long mul, so it must be
 549     // copied to a new register.
 550     right.set_destroys_register();
 551 
 552     left.load_item();
 553     right.load_item();
 554 
 555     LIR_Opr reg = FrameMap::long0_opr;
 556     arithmetic_op_long(x->op(), reg, left.result(), right.result(), nullptr);
 557     LIR_Opr result = rlock_result(x);
 558     __ move(reg, result);
 559   } else {
 560     // missing test if instr is commutative and if we should swap
 561     LIRItem left(x->x(), this);
 562     LIRItem right(x->y(), this);
 563 
 564     left.load_item();
 565     // don't load constants to save register
 566     right.load_nonconstant();
 567     rlock_result(x);
 568     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
 569   }
 570 }
 571 
 572 
 573 
 574 // for: _iadd, _imul, _isub, _idiv, _irem
 575 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 576   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 577     // The requirements for division and modulo
 578     // input : rax,: dividend                         min_int
 579     //         reg: divisor   (may not be rax,/rdx)   -1
 580     //
 581     // output: rax,: quotient  (= rax, idiv reg)       min_int
 582     //         rdx: remainder (= rax, irem reg)       0
 583 
 584     // rax, and rdx will be destroyed
 585 
 586     // Note: does this invalidate the spec ???
 587     LIRItem right(x->y(), this);
 588     LIRItem left(x->x() , this);   // visit left second, so that the is_register test is valid
 589 
 590     // call state_for before load_item_force because state_for may
 591     // force the evaluation of other instructions that are needed for
 592     // correct debug info.  Otherwise the live range of the fix
 593     // register might be too long.
 594     CodeEmitInfo* info = state_for(x);
 595 
 596     left.load_item_force(divInOpr());
 597 
 598     right.load_item();
 599 
 600     LIR_Opr result = rlock_result(x);
 601     LIR_Opr result_reg;
 602     if (x->op() == Bytecodes::_idiv) {
 603       result_reg = divOutOpr();
 604     } else {
 605       result_reg = remOutOpr();
 606     }
 607 
 608     if (!ImplicitDiv0Checks) {
 609       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
 610       __ branch(lir_cond_equal, new DivByZeroStub(info));
 611       // Idiv/irem cannot trap (passing info would generate an assertion).
 612       info = nullptr;
 613     }
 614     LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation
 615     if (x->op() == Bytecodes::_irem) {
 616       __ irem(left.result(), right.result(), result_reg, tmp, info);
 617     } else if (x->op() == Bytecodes::_idiv) {
 618       __ idiv(left.result(), right.result(), result_reg, tmp, info);
 619     } else {
 620       ShouldNotReachHere();
 621     }
 622 
 623     __ move(result_reg, result);
 624   } else {
 625     // missing test if instr is commutative and if we should swap
 626     LIRItem left(x->x(),  this);
 627     LIRItem right(x->y(), this);
 628     LIRItem* left_arg = &left;
 629     LIRItem* right_arg = &right;
 630     if (x->is_commutative() && left.is_stack() && right.is_register()) {
 631       // swap them if left is real stack (or cached) and right is real register(not cached)
 632       left_arg = &right;
 633       right_arg = &left;
 634     }
 635 
 636     left_arg->load_item();
 637 
 638     // do not need to load right, as we can handle stack and constants
 639     if (x->op() == Bytecodes::_imul ) {
 640       // check if we can use shift instead
 641       bool use_constant = false;
 642       bool use_tmp = false;
 643       if (right_arg->is_constant()) {
 644         jint iconst = right_arg->get_jint_constant();
 645         if (iconst > 0 && iconst < max_jint) {
 646           if (is_power_of_2(iconst)) {
 647             use_constant = true;
 648           } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) {
 649             use_constant = true;
 650             use_tmp = true;
 651           }
 652         }
 653       }
 654       if (use_constant) {
 655         right_arg->dont_load_item();
 656       } else {
 657         right_arg->load_item();
 658       }
 659       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 660       if (use_tmp) {
 661         tmp = new_register(T_INT);
 662       }
 663       rlock_result(x);
 664 
 665       arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
 666     } else {
 667       right_arg->dont_load_item();
 668       rlock_result(x);
 669       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 670       arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
 671     }
 672   }
 673 }
 674 
 675 
 676 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 677   // when an operand with use count 1 is the left operand, then it is
 678   // likely that no move for 2-operand-LIR-form is necessary
 679   if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
 680     x->swap_operands();
 681   }
 682 
 683   ValueTag tag = x->type()->tag();
 684   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 685   switch (tag) {
 686     case floatTag:
 687     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 688     case longTag:    do_ArithmeticOp_Long(x); return;
 689     case intTag:     do_ArithmeticOp_Int(x);  return;
 690     default:         ShouldNotReachHere();    return;
 691   }
 692 }
 693 
 694 
 695 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 696 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 697   // count must always be in rcx
 698   LIRItem value(x->x(), this);
 699   LIRItem count(x->y(), this);
 700 
 701   ValueTag elemType = x->type()->tag();
 702   bool must_load_count = !count.is_constant() || elemType == longTag;
 703   if (must_load_count) {
 704     // count for long must be in register
 705     count.load_item_force(shiftCountOpr());
 706   } else {
 707     count.dont_load_item();
 708   }
 709   value.load_item();
 710   LIR_Opr reg = rlock_result(x);
 711 
 712   shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
 713 }
 714 
 715 
 716 // _iand, _land, _ior, _lor, _ixor, _lxor
 717 void LIRGenerator::do_LogicOp(LogicOp* x) {
 718   // when an operand with use count 1 is the left operand, then it is
 719   // likely that no move for 2-operand-LIR-form is necessary
 720   if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
 721     x->swap_operands();
 722   }
 723 
 724   LIRItem left(x->x(), this);
 725   LIRItem right(x->y(), this);
 726 
 727   left.load_item();
 728   right.load_nonconstant();
 729   LIR_Opr reg = rlock_result(x);
 730 
 731   logic_op(x->op(), reg, left.result(), right.result());
 732 }
 733 
 734 
 735 
 736 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 737 void LIRGenerator::do_CompareOp(CompareOp* x) {
 738   LIRItem left(x->x(), this);
 739   LIRItem right(x->y(), this);
 740   ValueTag tag = x->x()->type()->tag();
 741   if (tag == longTag) {
 742     left.set_destroys_register();
 743   }
 744   left.load_item();
 745   right.load_item();
 746   LIR_Opr reg = rlock_result(x);
 747 
 748   if (x->x()->type()->is_float_kind()) {
 749     Bytecodes::Code code = x->op();
 750     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 751   } else if (x->x()->type()->tag() == longTag) {
 752     __ lcmp2int(left.result(), right.result(), reg);
 753   } else {
 754     Unimplemented();
 755   }
 756 }
 757 
 758 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 759   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 760   if (is_reference_type(type)) {
 761     cmp_value.load_item_force(FrameMap::rax_oop_opr);
 762     new_value.load_item();
 763     __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 764   } else if (type == T_INT) {
 765     cmp_value.load_item_force(FrameMap::rax_opr);
 766     new_value.load_item();
 767     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 768   } else if (type == T_LONG) {
 769     cmp_value.load_item_force(FrameMap::long0_opr);
 770     new_value.load_item_force(FrameMap::long1_opr);
 771     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 772   } else {
 773     Unimplemented();
 774   }
 775   LIR_Opr result = new_register(T_INT);
 776   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 777            result, T_INT);
 778   return result;
 779 }
 780 
 781 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 782   bool is_oop = is_reference_type(type);
 783   LIR_Opr result = new_register(type);
 784   value.load_item();
 785   // Because we want a 2-arg form of xchg and xadd
 786   __ move(value.result(), result);
 787   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
 788   __ xchg(addr, result, result, LIR_OprFact::illegalOpr);
 789   return result;
 790 }
 791 
 792 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 793   LIR_Opr result = new_register(type);
 794   value.load_item();
 795   // Because we want a 2-arg form of xchg and xadd
 796   __ move(value.result(), result);
 797   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
 798   __ xadd(addr, result, result, LIR_OprFact::illegalOpr);
 799   return result;
 800 }
 801 
 802 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
 803   assert(x->number_of_arguments() == 3, "wrong type");
 804   assert(UseFMA, "Needs FMA instructions support.");
 805   LIRItem value(x->argument_at(0), this);
 806   LIRItem value1(x->argument_at(1), this);
 807   LIRItem value2(x->argument_at(2), this);
 808 
 809   value2.set_destroys_register();
 810 
 811   value.load_item();
 812   value1.load_item();
 813   value2.load_item();
 814 
 815   LIR_Opr calc_input = value.result();
 816   LIR_Opr calc_input1 = value1.result();
 817   LIR_Opr calc_input2 = value2.result();
 818   LIR_Opr calc_result = rlock_result(x);
 819 
 820   switch (x->id()) {
 821   case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
 822   case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
 823   default:                    ShouldNotReachHere();
 824   }
 825 
 826 }
 827 
 828 
 829 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 830   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
 831 
 832   if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
 833       x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
 834       x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
 835       x->id() == vmIntrinsics::_dlog10) {
 836     do_LibmIntrinsic(x);
 837     return;
 838   }
 839 
 840   LIRItem value(x->argument_at(0), this);
 841 
 842   bool use_fpu = false;
 843 #ifndef _LP64
 844   if (UseSSE < 2) {
 845     value.set_destroys_register();
 846   }
 847 #endif // !LP64
 848   value.load_item();
 849 
 850   LIR_Opr calc_input = value.result();
 851   LIR_Opr calc_result = rlock_result(x);
 852 
 853   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 854 #ifdef _LP64
 855   if (UseAVX > 2 && (!VM_Version::supports_avx512vl()) &&
 856       (x->id() == vmIntrinsics::_dabs)) {
 857     tmp = new_register(T_DOUBLE);
 858     __ move(LIR_OprFact::doubleConst(-0.0), tmp);
 859   }
 860 #endif
 861   if (x->id() == vmIntrinsics::_floatToFloat16) {
 862     tmp = new_register(T_FLOAT);
 863     __ move(LIR_OprFact::floatConst(-0.0), tmp);
 864   }
 865 
 866   switch(x->id()) {
 867     case vmIntrinsics::_dabs:
 868       __ abs(calc_input, calc_result, tmp);
 869       break;
 870     case vmIntrinsics::_dsqrt:
 871     case vmIntrinsics::_dsqrt_strict:
 872       __ sqrt(calc_input, calc_result, LIR_OprFact::illegalOpr);
 873       break;
 874     case vmIntrinsics::_floatToFloat16:
 875       __ f2hf(calc_input, calc_result, tmp);
 876       break;
 877     case vmIntrinsics::_float16ToFloat:
 878       __ hf2f(calc_input, calc_result, LIR_OprFact::illegalOpr);
 879       break;
 880     default:
 881       ShouldNotReachHere();
 882   }
 883 
 884   if (use_fpu) {
 885     __ move(calc_result, x->operand());
 886   }
 887 }
 888 
 889 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
 890   LIRItem value(x->argument_at(0), this);
 891   value.set_destroys_register();
 892 
 893   LIR_Opr calc_result = rlock_result(x);
 894   LIR_Opr result_reg = result_register_for(x->type());
 895 
 896   CallingConvention* cc = nullptr;
 897 
 898   if (x->id() == vmIntrinsics::_dpow) {
 899     LIRItem value1(x->argument_at(1), this);
 900 
 901     value1.set_destroys_register();
 902 
 903     BasicTypeList signature(2);
 904     signature.append(T_DOUBLE);
 905     signature.append(T_DOUBLE);
 906     cc = frame_map()->c_calling_convention(&signature);
 907     value.load_item_force(cc->at(0));
 908     value1.load_item_force(cc->at(1));
 909   } else {
 910     BasicTypeList signature(1);
 911     signature.append(T_DOUBLE);
 912     cc = frame_map()->c_calling_convention(&signature);
 913     value.load_item_force(cc->at(0));
 914   }
 915 
 916 #ifndef _LP64
 917   LIR_Opr tmp = FrameMap::fpu0_double_opr;
 918   result_reg = tmp;
 919   switch(x->id()) {
 920     case vmIntrinsics::_dexp:
 921       if (StubRoutines::dexp() != nullptr) {
 922         __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
 923       } else {
 924         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
 925       }
 926       break;
 927     case vmIntrinsics::_dlog:
 928       if (StubRoutines::dlog() != nullptr) {
 929         __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
 930       } else {
 931         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
 932       }
 933       break;
 934     case vmIntrinsics::_dlog10:
 935       if (StubRoutines::dlog10() != nullptr) {
 936        __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
 937       } else {
 938         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
 939       }
 940       break;
 941     case vmIntrinsics::_dpow:
 942       if (StubRoutines::dpow() != nullptr) {
 943         __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
 944       } else {
 945         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
 946       }
 947       break;
 948     case vmIntrinsics::_dsin:
 949       if (VM_Version::supports_sse2() && StubRoutines::dsin() != nullptr) {
 950         __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
 951       } else {
 952         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
 953       }
 954       break;
 955     case vmIntrinsics::_dcos:
 956       if (VM_Version::supports_sse2() && StubRoutines::dcos() != nullptr) {
 957         __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
 958       } else {
 959         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
 960       }
 961       break;
 962     case vmIntrinsics::_dtan:
 963       if (StubRoutines::dtan() != nullptr) {
 964         __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
 965       } else {
 966         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
 967       }
 968       break;
 969     default:  ShouldNotReachHere();
 970   }
 971 #else
 972   switch (x->id()) {
 973     case vmIntrinsics::_dexp:
 974       if (StubRoutines::dexp() != nullptr) {
 975         __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
 976       } else {
 977         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
 978       }
 979       break;
 980     case vmIntrinsics::_dlog:
 981       if (StubRoutines::dlog() != nullptr) {
 982       __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
 983       } else {
 984         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
 985       }
 986       break;
 987     case vmIntrinsics::_dlog10:
 988       if (StubRoutines::dlog10() != nullptr) {
 989       __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
 990       } else {
 991         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
 992       }
 993       break;
 994     case vmIntrinsics::_dpow:
 995        if (StubRoutines::dpow() != nullptr) {
 996         __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
 997       } else {
 998         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
 999       }
1000       break;
1001     case vmIntrinsics::_dsin:
1002       if (StubRoutines::dsin() != nullptr) {
1003         __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
1004       } else {
1005         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
1006       }
1007       break;
1008     case vmIntrinsics::_dcos:
1009       if (StubRoutines::dcos() != nullptr) {
1010         __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
1011       } else {
1012         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
1013       }
1014       break;
1015     case vmIntrinsics::_dtan:
1016        if (StubRoutines::dtan() != nullptr) {
1017       __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
1018       } else {
1019         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
1020       }
1021       break;
1022     default:  ShouldNotReachHere();
1023   }
1024 #endif // _LP64
1025   __ move(result_reg, calc_result);
1026 }
1027 
1028 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
1029   assert(x->number_of_arguments() == 5, "wrong type");
1030 
1031   // Make all state_for calls early since they can emit code
1032   CodeEmitInfo* info = state_for(x, x->state());
1033 
1034   LIRItem src(x->argument_at(0), this);
1035   LIRItem src_pos(x->argument_at(1), this);
1036   LIRItem dst(x->argument_at(2), this);
1037   LIRItem dst_pos(x->argument_at(3), this);
1038   LIRItem length(x->argument_at(4), this);
1039 
1040   // operands for arraycopy must use fixed registers, otherwise
1041   // LinearScan will fail allocation (because arraycopy always needs a
1042   // call)
1043 
1044 #ifndef _LP64
1045   src.load_item_force     (FrameMap::rcx_oop_opr);
1046   src_pos.load_item_force (FrameMap::rdx_opr);
1047   dst.load_item_force     (FrameMap::rax_oop_opr);
1048   dst_pos.load_item_force (FrameMap::rbx_opr);
1049   length.load_item_force  (FrameMap::rdi_opr);
1050   LIR_Opr tmp =           (FrameMap::rsi_opr);
1051 #else
1052 
1053   // The java calling convention will give us enough registers
1054   // so that on the stub side the args will be perfect already.
1055   // On the other slow/special case side we call C and the arg
1056   // positions are not similar enough to pick one as the best.
1057   // Also because the java calling convention is a "shifted" version
1058   // of the C convention we can process the java args trivially into C
1059   // args without worry of overwriting during the xfer
1060 
1061   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
1062   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
1063   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
1064   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
1065   length.load_item_force  (FrameMap::as_opr(j_rarg4));
1066 
1067   LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
1068 #endif // LP64
1069 
1070   set_no_result(x);
1071 
1072   int flags;
1073   ciArrayKlass* expected_type;
1074   arraycopy_helper(x, &flags, &expected_type);
1075 
1076   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
1077 }
1078 
1079 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1080   assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
1081   // Make all state_for calls early since they can emit code
1082   LIR_Opr result = rlock_result(x);
1083   int flags = 0;
1084   switch (x->id()) {
1085     case vmIntrinsics::_updateCRC32: {
1086       LIRItem crc(x->argument_at(0), this);
1087       LIRItem val(x->argument_at(1), this);
1088       // val is destroyed by update_crc32
1089       val.set_destroys_register();
1090       crc.load_item();
1091       val.load_item();
1092       __ update_crc32(crc.result(), val.result(), result);
1093       break;
1094     }
1095     case vmIntrinsics::_updateBytesCRC32:
1096     case vmIntrinsics::_updateByteBufferCRC32: {
1097       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1098 
1099       LIRItem crc(x->argument_at(0), this);
1100       LIRItem buf(x->argument_at(1), this);
1101       LIRItem off(x->argument_at(2), this);
1102       LIRItem len(x->argument_at(3), this);
1103       buf.load_item();
1104       off.load_nonconstant();
1105 
1106       LIR_Opr index = off.result();
1107       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1108       if(off.result()->is_constant()) {
1109         index = LIR_OprFact::illegalOpr;
1110        offset += off.result()->as_jint();
1111       }
1112       LIR_Opr base_op = buf.result();
1113 
1114 #ifndef _LP64
1115       if (!is_updateBytes) { // long b raw address
1116          base_op = new_register(T_INT);
1117          __ convert(Bytecodes::_l2i, buf.result(), base_op);
1118       }
1119 #else
1120       if (index->is_valid()) {
1121         LIR_Opr tmp = new_register(T_LONG);
1122         __ convert(Bytecodes::_i2l, index, tmp);
1123         index = tmp;
1124       }
1125 #endif
1126 
1127       LIR_Address* a = new LIR_Address(base_op,
1128                                        index,
1129                                        offset,
1130                                        T_BYTE);
1131       BasicTypeList signature(3);
1132       signature.append(T_INT);
1133       signature.append(T_ADDRESS);
1134       signature.append(T_INT);
1135       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1136       const LIR_Opr result_reg = result_register_for(x->type());
1137 
1138       LIR_Opr addr = new_pointer_register();
1139       __ leal(LIR_OprFact::address(a), addr);
1140 
1141       crc.load_item_force(cc->at(0));
1142       __ move(addr, cc->at(1));
1143       len.load_item_force(cc->at(2));
1144 
1145       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1146       __ move(result_reg, result);
1147 
1148       break;
1149     }
1150     default: {
1151       ShouldNotReachHere();
1152     }
1153   }
1154 }
1155 
1156 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1157   Unimplemented();
1158 }
1159 
1160 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1161   assert(UseVectorizedMismatchIntrinsic, "need AVX instruction support");
1162 
1163   // Make all state_for calls early since they can emit code
1164   LIR_Opr result = rlock_result(x);
1165 
1166   LIRItem a(x->argument_at(0), this); // Object
1167   LIRItem aOffset(x->argument_at(1), this); // long
1168   LIRItem b(x->argument_at(2), this); // Object
1169   LIRItem bOffset(x->argument_at(3), this); // long
1170   LIRItem length(x->argument_at(4), this); // int
1171   LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int
1172 
1173   a.load_item();
1174   aOffset.load_nonconstant();
1175   b.load_item();
1176   bOffset.load_nonconstant();
1177 
1178   long constant_aOffset = 0;
1179   LIR_Opr result_aOffset = aOffset.result();
1180   if (result_aOffset->is_constant()) {
1181     constant_aOffset = result_aOffset->as_jlong();
1182     result_aOffset = LIR_OprFact::illegalOpr;
1183   }
1184   LIR_Opr result_a = a.result();
1185 
1186   long constant_bOffset = 0;
1187   LIR_Opr result_bOffset = bOffset.result();
1188   if (result_bOffset->is_constant()) {
1189     constant_bOffset = result_bOffset->as_jlong();
1190     result_bOffset = LIR_OprFact::illegalOpr;
1191   }
1192   LIR_Opr result_b = b.result();
1193 
1194 #ifndef _LP64
1195   result_a = new_register(T_INT);
1196   __ convert(Bytecodes::_l2i, a.result(), result_a);
1197   result_b = new_register(T_INT);
1198   __ convert(Bytecodes::_l2i, b.result(), result_b);
1199 #endif
1200 
1201 
1202   LIR_Address* addr_a = new LIR_Address(result_a,
1203                                         result_aOffset,
1204                                         constant_aOffset,
1205                                         T_BYTE);
1206 
1207   LIR_Address* addr_b = new LIR_Address(result_b,
1208                                         result_bOffset,
1209                                         constant_bOffset,
1210                                         T_BYTE);
1211 
1212   BasicTypeList signature(4);
1213   signature.append(T_ADDRESS);
1214   signature.append(T_ADDRESS);
1215   signature.append(T_INT);
1216   signature.append(T_INT);
1217   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1218   const LIR_Opr result_reg = result_register_for(x->type());
1219 
1220   LIR_Opr ptr_addr_a = new_pointer_register();
1221   __ leal(LIR_OprFact::address(addr_a), ptr_addr_a);
1222 
1223   LIR_Opr ptr_addr_b = new_pointer_register();
1224   __ leal(LIR_OprFact::address(addr_b), ptr_addr_b);
1225 
1226   __ move(ptr_addr_a, cc->at(0));
1227   __ move(ptr_addr_b, cc->at(1));
1228   length.load_item_force(cc->at(2));
1229   log2ArrayIndexScale.load_item_force(cc->at(3));
1230 
1231   __ call_runtime_leaf(StubRoutines::vectorizedMismatch(), getThreadTemp(), result_reg, cc->args());
1232   __ move(result_reg, result);
1233 }
1234 
1235 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1236 // _i2b, _i2c, _i2s
1237 LIR_Opr fixed_register_for(BasicType type) {
1238   switch (type) {
1239     case T_FLOAT:  return FrameMap::fpu0_float_opr;
1240     case T_DOUBLE: return FrameMap::fpu0_double_opr;
1241     case T_INT:    return FrameMap::rax_opr;
1242     case T_LONG:   return FrameMap::long0_opr;
1243     default:       ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
1244   }
1245 }
1246 
1247 void LIRGenerator::do_Convert(Convert* x) {
1248 #ifdef _LP64
1249   LIRItem value(x->value(), this);
1250   value.load_item();
1251   LIR_Opr input = value.result();
1252   LIR_Opr result = rlock(x);
1253   __ convert(x->op(), input, result);
1254   assert(result->is_virtual(), "result must be virtual register");
1255   set_result(x, result);
1256 #else
1257   // flags that vary for the different operations and different SSE-settings
1258   bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false;
1259 
1260   switch (x->op()) {
1261     case Bytecodes::_i2l: // fall through
1262     case Bytecodes::_l2i: // fall through
1263     case Bytecodes::_i2b: // fall through
1264     case Bytecodes::_i2c: // fall through
1265     case Bytecodes::_i2s: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = false; break;
1266 
1267     case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false;       round_result = false;      needs_stub = false; break;
1268     case Bytecodes::_d2f: fixed_input = false;       fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break;
1269     case Bytecodes::_i2f: fixed_input = false;       fixed_result = false;       round_result = UseSSE < 1; needs_stub = false; break;
1270     case Bytecodes::_i2d: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = false; break;
1271     case Bytecodes::_f2i: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = true;  break;
1272     case Bytecodes::_d2i: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = true;  break;
1273     case Bytecodes::_l2f: fixed_input = false;       fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break;
1274     case Bytecodes::_l2d: fixed_input = false;       fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break;
1275     case Bytecodes::_f2l: fixed_input = true;        fixed_result = true;        round_result = false;      needs_stub = false; break;
1276     case Bytecodes::_d2l: fixed_input = true;        fixed_result = true;        round_result = false;      needs_stub = false; break;
1277     default: ShouldNotReachHere();
1278   }
1279 
1280   LIRItem value(x->value(), this);
1281   value.load_item();
1282   LIR_Opr input = value.result();
1283   LIR_Opr result = rlock(x);
1284 
1285   // arguments of lir_convert
1286   LIR_Opr conv_input = input;
1287   LIR_Opr conv_result = result;
1288   ConversionStub* stub = nullptr;
1289 
1290   if (fixed_input) {
1291     conv_input = fixed_register_for(input->type());
1292     __ move(input, conv_input);
1293   }
1294 
1295   assert(fixed_result == false || round_result == false, "cannot set both");
1296   if (fixed_result) {
1297     conv_result = fixed_register_for(result->type());
1298   } else if (round_result) {
1299     result = new_register(result->type());
1300     set_vreg_flag(result, must_start_in_memory);
1301   }
1302 
1303   if (needs_stub) {
1304     stub = new ConversionStub(x->op(), conv_input, conv_result);
1305   }
1306 
1307   __ convert(x->op(), conv_input, conv_result, stub);
1308 
1309   if (result != conv_result) {
1310     __ move(conv_result, result);
1311   }
1312 
1313   assert(result->is_virtual(), "result must be virtual register");
1314   set_result(x, result);
1315 #endif // _LP64
1316 }
1317 
1318 
1319 void LIRGenerator::do_NewInstance(NewInstance* x) {
1320   print_if_not_loaded(x);
1321 
1322   CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1323   LIR_Opr reg = result_register_for(x->type());
1324   new_instance(reg, x->klass(), x->is_unresolved(),
1325                !x->is_unresolved() && x->klass()->is_inlinetype(),
1326                FrameMap::rcx_oop_opr,
1327                FrameMap::rdi_oop_opr,
1328                FrameMap::rsi_oop_opr,
1329                LIR_OprFact::illegalOpr,
1330                FrameMap::rdx_metadata_opr, info);
1331   LIR_Opr result = rlock_result(x);
1332   __ move(reg, result);
1333 }
1334 
1335 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1336   CodeEmitInfo* info = state_for(x, x->state());
1337 
1338   LIRItem length(x->length(), this);
1339   length.load_item_force(FrameMap::rbx_opr);
1340 
1341   LIR_Opr reg = result_register_for(x->type());
1342   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1343   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1344   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1345   LIR_Opr tmp4 = reg;
1346   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1347   LIR_Opr len = length.result();
1348   BasicType elem_type = x->elt_type();
1349 
1350   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1351 
1352   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1353   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, false);
1354 
1355   LIR_Opr result = rlock_result(x);
1356   __ move(reg, result);
1357 }
1358 
1359 
1360 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1361   LIRItem length(x->length(), this);
1362   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1363   // and therefore provide the state before the parameters have been consumed
1364   CodeEmitInfo* patching_info = nullptr;
1365   if (!x->klass()->is_loaded() || PatchALot) {
1366     patching_info =  state_for(x, x->state_before());
1367   }
1368 
1369   CodeEmitInfo* info = state_for(x, x->state());
1370 
1371   const LIR_Opr reg = result_register_for(x->type());
1372   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1373   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1374   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1375   LIR_Opr tmp4 = reg;
1376   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1377 
1378   length.load_item_force(FrameMap::rbx_opr);
1379   LIR_Opr len = length.result();
1380 
1381   ciKlass* obj = (ciKlass*) x->exact_type();
1382   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1383   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1384     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1385   }
1386   klass2reg_with_patching(klass_reg, obj, patching_info);
1387   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, x->is_null_free());
1388 
1389   LIR_Opr result = rlock_result(x);
1390   __ move(reg, result);
1391 }
1392 
1393 
1394 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1395   Values* dims = x->dims();
1396   int i = dims->length();
1397   LIRItemList* items = new LIRItemList(i, i, nullptr);
1398   while (i-- > 0) {
1399     LIRItem* size = new LIRItem(dims->at(i), this);
1400     items->at_put(i, size);
1401   }
1402 
1403   // Evaluate state_for early since it may emit code.
1404   CodeEmitInfo* patching_info = nullptr;
1405   if (!x->klass()->is_loaded() || PatchALot) {
1406     patching_info = state_for(x, x->state_before());
1407 
1408     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1409     // clone all handlers (NOTE: Usually this is handled transparently
1410     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1411     // is done explicitly here because a stub isn't being used).
1412     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1413   }
1414   CodeEmitInfo* info = state_for(x, x->state());
1415 
1416   i = dims->length();
1417   while (i-- > 0) {
1418     LIRItem* size = items->at(i);
1419     size->load_nonconstant();
1420 
1421     store_stack_parameter(size->result(), in_ByteSize(i*4));
1422   }
1423 
1424   LIR_Opr klass_reg = FrameMap::rax_metadata_opr;
1425   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1426 
1427   LIR_Opr rank = FrameMap::rbx_opr;
1428   __ move(LIR_OprFact::intConst(x->rank()), rank);
1429   LIR_Opr varargs = FrameMap::rcx_opr;
1430   __ move(FrameMap::rsp_opr, varargs);
1431   LIR_OprList* args = new LIR_OprList(3);
1432   args->append(klass_reg);
1433   args->append(rank);
1434   args->append(varargs);
1435   LIR_Opr reg = result_register_for(x->type());
1436   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1437                   LIR_OprFact::illegalOpr,
1438                   reg, args, info);
1439 
1440   LIR_Opr result = rlock_result(x);
1441   __ move(reg, result);
1442 }
1443 
1444 
1445 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1446   // nothing to do for now
1447 }
1448 
1449 
1450 void LIRGenerator::do_CheckCast(CheckCast* x) {
1451   LIRItem obj(x->obj(), this);
1452 
1453   CodeEmitInfo* patching_info = nullptr;
1454   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1455     // must do this before locking the destination register as an oop register,
1456     // and before the obj is loaded (the latter is for deoptimization)
1457     patching_info = state_for(x, x->state_before());
1458   }
1459   obj.load_item();
1460 
1461   // info for exceptions
1462   CodeEmitInfo* info_for_exception =
1463       (x->needs_exception_state() ? state_for(x) :
1464                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1465 
1466   if (x->is_null_free()) {
1467     __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1468   }
1469 
1470   CodeStub* stub;
1471   if (x->is_incompatible_class_change_check()) {
1472     assert(patching_info == nullptr, "can't patch this");
1473     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1474   } else if (x->is_invokespecial_receiver_check()) {
1475     assert(patching_info == nullptr, "can't patch this");
1476     stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1477   } else {
1478     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1479   }
1480   LIR_Opr reg = rlock_result(x);
1481   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1482   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1483     tmp3 = new_register(objectType);
1484   }
1485   __ checkcast(reg, obj.result(), x->klass(),
1486                new_register(objectType), new_register(objectType), tmp3,
1487                x->direct_compare(), info_for_exception, patching_info, stub,
1488                x->profiled_method(), x->profiled_bci(), x->is_null_free());
1489 }
1490 
1491 
1492 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1493   LIRItem obj(x->obj(), this);
1494 
1495   // result and test object may not be in same register
1496   LIR_Opr reg = rlock_result(x);
1497   CodeEmitInfo* patching_info = nullptr;
1498   if ((!x->klass()->is_loaded() || PatchALot)) {
1499     // must do this before locking the destination register as an oop register
1500     patching_info = state_for(x, x->state_before());
1501   }
1502   obj.load_item();
1503   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1504   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1505     tmp3 = new_register(objectType);
1506   }
1507   __ instanceof(reg, obj.result(), x->klass(),
1508                 new_register(objectType), new_register(objectType), tmp3,
1509                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1510 }
1511 
1512 
1513 void LIRGenerator::do_If(If* x) {
1514   assert(x->number_of_sux() == 2, "inconsistency");
1515   ValueTag tag = x->x()->type()->tag();
1516   bool is_safepoint = x->is_safepoint();
1517 
1518   If::Condition cond = x->cond();
1519 
1520   LIRItem xitem(x->x(), this);
1521   LIRItem yitem(x->y(), this);
1522   LIRItem* xin = &xitem;
1523   LIRItem* yin = &yitem;
1524 
1525   if (tag == longTag) {
1526     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1527     // mirror for other conditions
1528     if (cond == If::gtr || cond == If::leq) {
1529       cond = Instruction::mirror(cond);
1530       xin = &yitem;
1531       yin = &xitem;
1532     }
1533     xin->set_destroys_register();
1534   }
1535   xin->load_item();
1536   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1537     // inline long zero
1538     yin->dont_load_item();
1539   } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) {
1540     // longs cannot handle constants at right side
1541     yin->load_item();
1542   } else {
1543     yin->dont_load_item();
1544   }
1545 
1546   LIR_Opr left = xin->result();
1547   LIR_Opr right = yin->result();
1548 
1549   set_no_result(x);
1550 
1551   // add safepoint before generating condition code so it can be recomputed
1552   if (x->is_safepoint()) {
1553     // increment backedge counter if needed
1554     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1555         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1556     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1557   }
1558 
1559   if (x->substitutability_check()) {
1560     substitutability_check(x, *xin, *yin);
1561   } else {
1562     __ cmp(lir_cond(cond), left, right);
1563   }
1564   // Generate branch profiling. Profiling code doesn't kill flags.
1565   profile_branch(x, cond);
1566   move_to_phi(x->state());
1567   if (x->x()->type()->is_float_kind()) {
1568     __ branch(lir_cond(cond), x->tsux(), x->usux());
1569   } else {
1570     __ branch(lir_cond(cond), x->tsux());
1571   }
1572   assert(x->default_sux() == x->fsux(), "wrong destination above");
1573   __ jump(x->default_sux());
1574 }
1575 
1576 
1577 LIR_Opr LIRGenerator::getThreadPointer() {
1578 #ifdef _LP64
1579   return FrameMap::as_pointer_opr(r15_thread);
1580 #else
1581   LIR_Opr result = new_register(T_INT);
1582   __ get_thread(result);
1583   return result;
1584 #endif //
1585 }
1586 
1587 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1588   store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0));
1589   LIR_OprList* args = new LIR_OprList();
1590   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1591   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1592 }
1593 
1594 
1595 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1596                                         CodeEmitInfo* info) {
1597   if (address->type() == T_LONG) {
1598     address = new LIR_Address(address->base(),
1599                               address->index(), address->scale(),
1600                               address->disp(), T_DOUBLE);
1601     // Transfer the value atomically by using FP moves.  This means
1602     // the value has to be moved between CPU and FPU registers.  It
1603     // always has to be moved through spill slot since there's no
1604     // quick way to pack the value into an SSE register.
1605     LIR_Opr temp_double = new_register(T_DOUBLE);
1606     LIR_Opr spill = new_register(T_LONG);
1607     set_vreg_flag(spill, must_start_in_memory);
1608     __ move(value, spill);
1609     __ volatile_move(spill, temp_double, T_LONG);
1610     __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
1611   } else {
1612     __ store(value, address, info);
1613   }
1614 }
1615 
1616 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1617                                        CodeEmitInfo* info) {
1618   if (address->type() == T_LONG) {
1619     address = new LIR_Address(address->base(),
1620                               address->index(), address->scale(),
1621                               address->disp(), T_DOUBLE);
1622     // Transfer the value atomically by using FP moves.  This means
1623     // the value has to be moved between CPU and FPU registers.  In
1624     // SSE0 and SSE1 mode it has to be moved through spill slot but in
1625     // SSE2+ mode it can be moved directly.
1626     LIR_Opr temp_double = new_register(T_DOUBLE);
1627     __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1628     __ volatile_move(temp_double, result, T_LONG);
1629 #ifndef _LP64
1630     if (UseSSE < 2) {
1631       // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1632       set_vreg_flag(result, must_start_in_memory);
1633     }
1634 #endif // !LP64
1635   } else {
1636     __ load(address, result, info);
1637   }
1638 }