1 /*
   2  * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_FrameMap.hpp"
  31 #include "c1/c1_Instruction.hpp"
  32 #include "c1/c1_LIRAssembler.hpp"
  33 #include "c1/c1_LIRGenerator.hpp"
  34 #include "c1/c1_Runtime1.hpp"
  35 #include "c1/c1_ValueStack.hpp"
  36 #include "ci/ciArray.hpp"
  37 #include "ci/ciObjArrayKlass.hpp"
  38 #include "ci/ciTypeArrayKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 #include "vmreg_riscv.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 // Item will be loaded into a byte register; Intel only
  51 void LIRItem::load_byte_item() {
  52   load_item();
  53 }
  54 
  55 
  56 void LIRItem::load_nonconstant() {
  57   LIR_Opr r = value()->operand();
  58   if (r->is_constant()) {
  59     _result = r;
  60   } else {
  61     load_item();
  62   }
  63 }
  64 
  65 //--------------------------------------------------------------
  66 //               LIRGenerator
  67 //--------------------------------------------------------------
  68 
  69 
  70 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r10_oop_opr; }
  71 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r13_opr; }
  72 LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
  73 LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  74 LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  75 LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
  76 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
  77 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r10_opr; }
  78 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  79 
  80 
  81 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  82   LIR_Opr opr;
  83   switch (type->tag()) {
  84     case intTag:     opr = FrameMap::r10_opr;          break;
  85     case objectTag:  opr = FrameMap::r10_oop_opr;      break;
  86     case longTag:    opr = FrameMap::long10_opr;       break;
  87     case floatTag:   opr = FrameMap::fpu10_float_opr;  break;
  88     case doubleTag:  opr = FrameMap::fpu10_double_opr; break;
  89 
  90     case addressTag: // fall through
  91     default:
  92       ShouldNotReachHere();
  93       return LIR_OprFact::illegalOpr;
  94   }
  95 
  96   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  97   return opr;
  98 }
  99 
 100 
 101 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 102   LIR_Opr reg = new_register(T_INT);
 103   set_vreg_flag(reg, LIRGenerator::byte_reg);
 104   return reg;
 105 }
 106 
 107 //--------- loading items into registers --------------------------------
 108 
 109 
 110 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 111   if (v->type()->as_IntConstant() != NULL) {
 112     return v->type()->as_IntConstant()->value() == 0;
 113   } else if (v->type()->as_LongConstant() != NULL) {
 114     return v->type()->as_LongConstant()->value() == 0;
 115   } else if (v->type()->as_ObjectConstant() != NULL) {
 116     return v->type()->as_ObjectConstant()->value()->is_null_object();
 117   } else if (v->type()->as_FloatConstant() != NULL) {
 118     return jint_cast(v->type()->as_FloatConstant()->value()) == 0.0f;
 119   } else if (v->type()->as_DoubleConstant() != NULL) {
 120     return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0.0;
 121   }
 122   return false;
 123 }
 124 
 125 bool LIRGenerator::can_inline_as_constant(Value v) const {
 126   if (v->type()->as_IntConstant() != NULL) {
 127     int value = v->type()->as_IntConstant()->value();
 128     // "-value" must be defined for value may be used for sub
 129     return Assembler::operand_valid_for_add_immediate(value) &&
 130            Assembler::operand_valid_for_add_immediate(- value);
 131   } else if (v->type()->as_ObjectConstant() != NULL) {
 132     return v->type()->as_ObjectConstant()->value()->is_null_object();
 133   } else if (v->type()->as_LongConstant() != NULL) {
 134     long value = v->type()->as_LongConstant()->value();
 135     // "-value" must be defined for value may be used for sub
 136     return Assembler::operand_valid_for_add_immediate(value) &&
 137            Assembler::operand_valid_for_add_immediate(- value);
 138   } else if (v->type()->as_FloatConstant() != NULL) {
 139     return v->type()->as_FloatConstant()->value() == 0.0f;
 140   } else if (v->type()->as_DoubleConstant() != NULL) {
 141     return v->type()->as_DoubleConstant()->value() == 0.0;
 142   }
 143   return false;
 144 }
 145 
 146 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 147   if (c->as_constant() != NULL) {
 148     long constant = 0;
 149     switch (c->type()) {
 150       case T_INT:  constant = c->as_jint();   break;
 151       case T_LONG: constant = c->as_jlong();  break;
 152       default:     return false;
 153     }
 154     // "-constant" must be defined for c may be used for sub
 155     return Assembler::operand_valid_for_add_immediate(constant) &&
 156            Assembler::operand_valid_for_add_immediate(- constant);
 157   }
 158   return false;
 159 }
 160 
 161 LIR_Opr LIRGenerator::safepoint_poll_register() {
 162   return LIR_OprFact::illegalOpr;
 163 }
 164 
 165 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 166                                             int shift, int disp, BasicType type) {
 167   assert(base->is_register(), "must be");
 168 
 169   if (index->is_constant()) {
 170     LIR_Const *constant = index->as_constant_ptr();
 171     jlong c;
 172     if (constant->type() == T_INT) {
 173       c = (jlong(index->as_jint()) << shift) + disp;
 174     } else {
 175       assert(constant->type() == T_LONG, "should be");
 176       c = (index->as_jlong() << shift) + disp;
 177     }
 178     if ((jlong)((jint)c) == c) {
 179       return new LIR_Address(base, (jint)c, type);
 180     } else {
 181       LIR_Opr tmp = new_register(T_LONG);
 182       __ move(index, tmp);
 183       return new LIR_Address(base, tmp, type);
 184     }
 185   }
 186 
 187   return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
 188 }
 189 
 190 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 191                                               BasicType type) {
 192   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 193   int elem_size = type2aelembytes(type);
 194   int shift = exact_log2(elem_size);
 195   return generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
 196 }
 197 
 198 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 199   LIR_Opr r;
 200   switch (type) {
 201     case T_LONG:
 202       r = LIR_OprFact::longConst(x);
 203       break;
 204     case T_INT:
 205       r = LIR_OprFact::intConst(x);
 206       break;
 207     default:
 208       ShouldNotReachHere();
 209   }
 210   return r;
 211 }
 212 
 213 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 214   LIR_Opr pointer = new_pointer_register();
 215   __ move(LIR_OprFact::intptrConst(counter), pointer);
 216   LIR_Address* addr = new LIR_Address(pointer, type);
 217   increment_counter(addr, step);
 218 }
 219 
 220 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 221   LIR_Opr reg = new_register(addr->type());
 222   __ load(addr, reg);
 223   __ add(reg, load_immediate(step, addr->type()), reg);
 224   __ store(reg, addr);
 225 }
 226 
 227 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 228   LIR_Opr reg = new_register(T_INT);
 229   __ load(generate_address(base, disp, T_INT), reg, info);
 230   __ cmp(condition, reg, LIR_OprFact::intConst(c));
 231 }
 232 
 233 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 234   LIR_Opr reg1 = new_register(T_INT);
 235   __ load(generate_address(base, disp, type), reg1, info);
 236   __ cmp(condition, reg, reg1);
 237 }
 238 
 239 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
 240   if (tmp->is_valid() && c > 0 && c < max_jint) {
 241     if (is_power_of_2(c - 1)) {
 242       __ shift_left(left, exact_log2(c - 1), tmp);
 243       __ add(tmp, left, result);
 244       return true;
 245     } else if (is_power_of_2(c + 1)) {
 246       __ shift_left(left, exact_log2(c + 1), tmp);
 247       __ sub(tmp, left, result);
 248       return true;
 249     }
 250   }
 251   return false;
 252 }
 253 
 254 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 255   BasicType type = item->type();
 256   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 257 }
 258 
 259 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info,
 260                                      ciMethod* profiled_method, int profiled_bci) {
 261     LIR_Opr tmp1 = new_register(objectType);
 262     LIR_Opr tmp2 = new_register(objectType);
 263     LIR_Opr tmp3 = new_register(objectType);
 264     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 265 }
 266 
 267 //----------------------------------------------------------------------
 268 //             visitor functions
 269 //----------------------------------------------------------------------
 270 
 271 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 272   assert(x->is_pinned(), "");
 273   LIRItem obj(x->obj(), this);
 274   obj.load_item();
 275 
 276   set_no_result(x);
 277 
 278   // "lock" stores the address of the monitor stack slot, so this is not an oop
 279   LIR_Opr lock = new_register(T_INT);
 280 
 281   CodeEmitInfo* info_for_exception = NULL;
 282   if (x->needs_null_check()) {
 283     info_for_exception = state_for(x);
 284   }
 285   // this CodeEmitInfo must not have the xhandlers because here the
 286   // object is already locked (xhandlers expect object to be unlocked)
 287   CodeEmitInfo* info = state_for(x, x->state(), true);
 288   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 289                 x->monitor_no(), info_for_exception, info);
 290 }
 291 
 292 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 293   assert(x->is_pinned(), "");
 294 
 295   LIRItem obj(x->obj(), this);
 296   obj.dont_load_item();
 297 
 298   LIR_Opr lock = new_register(T_INT);
 299   LIR_Opr obj_temp = new_register(T_INT);
 300   set_no_result(x);
 301   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 302 }
 303 
 304 // neg
 305 void LIRGenerator::do_NegateOp(NegateOp* x) {
 306   LIRItem from(x->x(), this);
 307   from.load_item();
 308   LIR_Opr result = rlock_result(x);
 309   __ negate(from.result(), result);
 310 }
 311 
 312 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 313 //      _dadd, _dmul, _dsub, _ddiv, _drem
 314 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 315   LIRItem left(x->x(), this);
 316   LIRItem right(x->y(), this);
 317 
 318   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
 319 
 320     // float remainder is implemented as a direct call into the runtime
 321     BasicTypeList signature(2);
 322     if (x->op() == Bytecodes::_frem) {
 323       signature.append(T_FLOAT);
 324       signature.append(T_FLOAT);
 325     } else {
 326       signature.append(T_DOUBLE);
 327       signature.append(T_DOUBLE);
 328     }
 329     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 330 
 331     const LIR_Opr result_reg = result_register_for(x->type());
 332 
 333     left.load_item();
 334     __ move(left.result(), cc->at(0));
 335     right.load_item_force(cc->at(1));
 336 
 337     address entry;
 338     if (x->op() == Bytecodes::_frem) {
 339       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 340     } else {
 341       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 342     }
 343 
 344     LIR_Opr result = rlock_result(x);
 345     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 346     __ move(result_reg, result);
 347 
 348     return;
 349   }
 350 
 351   if (!left.is_register()) {
 352     left.load_item();
 353   }
 354   // Always load right hand side.
 355   right.load_item();
 356 
 357   LIR_Opr reg = rlock(x);
 358   arithmetic_op_fpu(x->op(), reg, left.result(), right.result());
 359 
 360   set_result(x, round_item(reg));
 361 }
 362 
 363 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 364 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 365 
 366   // missing test if instr is commutative and if we should swap
 367   LIRItem left(x->x(), this);
 368   LIRItem right(x->y(), this);
 369 
 370   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 371 
 372     left.load_item();
 373 
 374     bool need_zero_check = true;
 375     if (right.is_constant()) {
 376       jlong c = right.get_jlong_constant();
 377       // no need to do div-by-zero check if the divisor is a non-zero constant
 378       if (c != 0) { need_zero_check = false; }
 379       // do not load right if the divisor is a power-of-2 constant
 380       if (c > 0 && is_power_of_2(c)) {
 381         right.dont_load_item();
 382       } else {
 383         right.load_item();
 384       }
 385     } else {
 386       right.load_item();
 387     }
 388     if (need_zero_check) {
 389       CodeEmitInfo* info = state_for(x);
 390       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 391       __ branch(lir_cond_equal, new DivByZeroStub(info));
 392     }
 393 
 394     rlock_result(x);
 395     switch (x->op()) {
 396       case Bytecodes::_lrem:
 397         __ rem(left.result(), right.result(), x->operand());
 398         break;
 399       case Bytecodes::_ldiv:
 400         __ div(left.result(), right.result(), x->operand());
 401         break;
 402       default:
 403         ShouldNotReachHere();
 404     }
 405   } else {
 406     assert(x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
 407            "expect lmul, ladd or lsub");
 408     // add, sub, mul
 409     left.load_item();
 410     if (!right.is_register()) {
 411       if (x->op() == Bytecodes::_lmul ||
 412           !right.is_constant() ||
 413           (x->op() == Bytecodes::_ladd &&
 414           !Assembler::operand_valid_for_add_immediate(right.get_jlong_constant())) ||
 415           (x->op() == Bytecodes::_lsub &&
 416           !Assembler::operand_valid_for_add_immediate(-right.get_jlong_constant()))) {
 417             right.load_item();
 418       } else { // add, sub
 419         assert(x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expected ladd or lsub");
 420         // don't load constants to save register
 421         right.load_nonconstant();
 422       }
 423     }
 424     rlock_result(x);
 425     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 426   }
 427 }
 428 
 429 // for: _iadd, _imul, _isub, _idiv, _irem
 430 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 431 
 432   // Test if instr is commutative and if we should swap
 433   LIRItem left(x->x(),  this);
 434   LIRItem right(x->y(), this);
 435   LIRItem* left_arg = &left;
 436   LIRItem* right_arg = &right;
 437   if (x->is_commutative() && left.is_stack() && right.is_register()) {
 438     // swap them if left is real stack (or cached) and right is real register(not cached)
 439     left_arg = &right;
 440     right_arg = &left;
 441   }
 442   left_arg->load_item();
 443   // do not need to load right, as we can handle stack and constants
 444   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 445 
 446     rlock_result(x);
 447 
 448     bool need_zero_check = true;
 449     if (right.is_constant()) {
 450       jint c = right.get_jint_constant();
 451       // no need to do div-by-zero check if the divisor is a non-zero constant
 452       if (c != 0) { need_zero_check = false; }
 453       // do not load right if the divisor is a power-of-2 constant
 454       if (c > 0 && is_power_of_2(c)) {
 455         right_arg->dont_load_item();
 456       } else {
 457         right_arg->load_item();
 458       }
 459     } else {
 460       right_arg->load_item();
 461     }
 462     if (need_zero_check) {
 463       CodeEmitInfo* info = state_for(x);
 464       __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
 465       __ branch(lir_cond_equal, new DivByZeroStub(info));
 466     }
 467 
 468     LIR_Opr ill = LIR_OprFact::illegalOpr;
 469     if (x->op() == Bytecodes::_irem) {
 470       __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
 471     } else if (x->op() == Bytecodes::_idiv) {
 472       __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
 473     }
 474 
 475   } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
 476     if (right.is_constant() &&
 477         ((x->op() == Bytecodes::_iadd && !Assembler::operand_valid_for_add_immediate(right.get_jint_constant())) ||
 478          (x->op() == Bytecodes::_isub && !Assembler::operand_valid_for_add_immediate(-right.get_jint_constant())))) {
 479       right.load_nonconstant();
 480     } else {
 481       right.load_item();
 482     }
 483     rlock_result(x);
 484     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
 485   } else {
 486     assert (x->op() == Bytecodes::_imul, "expect imul");
 487     if (right.is_constant()) {
 488       jint c = right.get_jint_constant();
 489       if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
 490         right_arg->dont_load_item();
 491       } else {
 492         // Cannot use constant op.
 493         right_arg->load_item();
 494       }
 495     } else {
 496       right.load_item();
 497     }
 498     rlock_result(x);
 499     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
 500   }
 501 }
 502 
 503 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 504   // when an operand with use count 1 is the left operand, then it is
 505   // likely that no move for 2-operand-LIR-form is necessary
 506   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 507     x->swap_operands();
 508   }
 509 
 510   ValueTag tag = x->type()->tag();
 511   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 512   switch (tag) {
 513     case floatTag:
 514     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 515     case longTag:    do_ArithmeticOp_Long(x); return;
 516     case intTag:     do_ArithmeticOp_Int(x);  return;
 517     default:         ShouldNotReachHere();    return;
 518   }
 519 }
 520 
 521 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 522 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 523   LIRItem value(x->x(), this);
 524   LIRItem count(x->y(), this);
 525 
 526   value.load_item();
 527   if (count.is_constant()) {
 528     assert(count.type()->as_IntConstant() != NULL || count.type()->as_LongConstant() != NULL , "should be");
 529     count.dont_load_item();
 530   } else {
 531     count.load_item();
 532   }
 533 
 534   LIR_Opr res = rlock_result(x);
 535   shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr);
 536 }
 537 
 538 
 539 // _iand, _land, _ior, _lor, _ixor, _lxor
 540 void LIRGenerator::do_LogicOp(LogicOp* x) {
 541 
 542   LIRItem left(x->x(),  this);
 543   LIRItem right(x->y(), this);
 544 
 545   left.load_item();
 546   rlock_result(x);
 547   ValueTag tag = right.type()->tag();
 548   if (right.is_constant() &&
 549      ((tag == longTag && Assembler::operand_valid_for_add_immediate(right.get_jlong_constant())) ||
 550       (tag == intTag && Assembler::operand_valid_for_add_immediate(right.get_jint_constant()))))  {
 551     right.dont_load_item();
 552   } else {
 553     right.load_item();
 554   }
 555 
 556   switch (x->op()) {
 557     case Bytecodes::_iand:  // fall through
 558     case Bytecodes::_land:
 559       __ logical_and(left.result(), right.result(), x->operand()); break;
 560     case Bytecodes::_ior:   // fall through
 561     case Bytecodes::_lor:
 562       __ logical_or(left.result(), right.result(), x->operand()); break;
 563     case Bytecodes::_ixor:  // fall through
 564     case Bytecodes::_lxor:
 565       __ logical_xor(left.result(), right.result(), x->operand()); break;
 566     default: Unimplemented();
 567   }
 568 }
 569 
 570 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 571 void LIRGenerator::do_CompareOp(CompareOp* x) {
 572   LIRItem left(x->x(), this);
 573   LIRItem right(x->y(), this);
 574   ValueTag tag = x->x()->type()->tag();
 575   if (tag == longTag) {
 576     left.set_destroys_register();
 577   }
 578   left.load_item();
 579   right.load_item();
 580   LIR_Opr reg = rlock_result(x);
 581 
 582   if (x->x()->type()->is_float_kind()) {
 583     Bytecodes::Code code = x->op();
 584     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 585   } else if (x->x()->type()->tag() == longTag) {
 586     __ lcmp2int(left.result(), right.result(), reg);
 587   } else {
 588     Unimplemented();
 589   }
 590 }
 591 
 592 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 593   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 594   new_value.load_item();
 595   cmp_value.load_item();
 596   LIR_Opr result = new_register(T_INT);
 597   if (is_reference_type(type)) {
 598     __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
 599   } else if (type == T_INT) {
 600     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 601   } else if (type == T_LONG) {
 602     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 603   } else {
 604     ShouldNotReachHere();
 605   }
 606   __ logical_xor(FrameMap::r5_opr, LIR_OprFact::intConst(1), result);
 607   return result;
 608 }
 609 
 610 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 611   bool is_oop = is_reference_type(type);
 612   LIR_Opr result = new_register(type);
 613   value.load_item();
 614   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
 615   LIR_Opr tmp = new_register(T_INT);
 616   __ xchg(addr, value.result(), result, tmp);
 617   return result;
 618 }
 619 
 620 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 621   LIR_Opr result = new_register(type);
 622   value.load_item();
 623   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
 624   LIR_Opr tmp = new_register(T_INT);
 625   __ xadd(addr, value.result(), result, tmp);
 626   return result;
 627 }
 628 
 629 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 630   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow),
 631          "wrong type");
 632 
 633   switch (x->id()) {
 634     case vmIntrinsics::_dexp: // fall through
 635     case vmIntrinsics::_dlog: // fall through
 636     case vmIntrinsics::_dpow: // fall through
 637     case vmIntrinsics::_dcos: // fall through
 638     case vmIntrinsics::_dsin: // fall through
 639     case vmIntrinsics::_dtan: // fall through
 640     case vmIntrinsics::_dlog10:
 641       do_LibmIntrinsic(x);
 642       break;
 643     case vmIntrinsics::_dabs: // fall through
 644     case vmIntrinsics::_dsqrt: // fall through
 645     case vmIntrinsics::_dsqrt_strict: {
 646       assert(x->number_of_arguments() == 1, "wrong type");
 647       LIRItem value(x->argument_at(0), this);
 648       value.load_item();
 649       LIR_Opr dst = rlock_result(x);
 650 
 651       switch (x->id()) {
 652         case vmIntrinsics::_dsqrt: // fall through
 653         case vmIntrinsics::_dsqrt_strict: {
 654           __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 655           break;
 656         }
 657         case vmIntrinsics::_dabs: {
 658           __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 659           break;
 660         }
 661         default:
 662           ShouldNotReachHere();
 663       }
 664       break;
 665     }
 666     default:
 667       ShouldNotReachHere();
 668   }
 669 }
 670 
 671 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
 672   LIRItem value(x->argument_at(0), this);
 673   value.set_destroys_register();
 674   LIR_Opr calc_result = rlock_result(x);
 675   LIR_Opr result_reg = result_register_for(x->type());
 676   CallingConvention* cc = NULL;
 677   BasicTypeList signature(1);
 678   signature.append(T_DOUBLE);
 679   if (x->id() == vmIntrinsics::_dpow) { signature.append(T_DOUBLE); }
 680   cc = frame_map()->c_calling_convention(&signature);
 681   value.load_item_force(cc->at(0));
 682   if (x->id() == vmIntrinsics::_dpow) {
 683     LIRItem value1(x->argument_at(1), this);
 684     value1.set_destroys_register();
 685     value1.load_item_force(cc->at(1));
 686   }
 687   switch (x->id()) {
 688     case vmIntrinsics::_dexp:
 689       if (StubRoutines::dexp() != NULL) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); }
 690       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); }
 691       break;
 692     case vmIntrinsics::_dlog:
 693       if (StubRoutines::dlog() != NULL) {  __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); }
 694       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); }
 695       break;
 696     case vmIntrinsics::_dlog10:
 697       if (StubRoutines::dlog10() != NULL) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); }
 698       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); }
 699       break;
 700     case vmIntrinsics::_dsin:
 701       if (StubRoutines::dsin() != NULL) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); }
 702       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); }
 703       break;
 704     case vmIntrinsics::_dcos:
 705       if (StubRoutines::dcos() != NULL) {  __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); }
 706       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); }
 707       break;
 708     case vmIntrinsics::_dtan:
 709       if (StubRoutines::dtan() != NULL) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); }
 710       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); }
 711       break;
 712     case vmIntrinsics::_dpow:
 713       if (StubRoutines::dpow() != NULL) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); }
 714       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); }
 715       break;
 716     default:  ShouldNotReachHere();
 717   }
 718   __ move(result_reg, calc_result);
 719 }
 720 
 721 
 722 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 723   assert(x->number_of_arguments() == 5, "wrong type");
 724 
 725   // Make all state_for calls early since they can emit code
 726   CodeEmitInfo* info = state_for(x, x->state());
 727 
 728   LIRItem src(x->argument_at(0), this);
 729   LIRItem src_pos(x->argument_at(1), this);
 730   LIRItem dst(x->argument_at(2), this);
 731   LIRItem dst_pos(x->argument_at(3), this);
 732   LIRItem length(x->argument_at(4), this);
 733 
 734   // operands for arraycopy must use fixed registers, otherwise
 735   // LinearScan will fail allocation (because arraycopy always needs a
 736   // call)
 737 
 738   // The java calling convention will give us enough registers
 739   // so that on the stub side the args will be perfect already.
 740   // On the other slow/special case side we call C and the arg
 741   // positions are not similar enough to pick one as the best.
 742   // Also because the java calling convention is a "shifted" version
 743   // of the C convention we can process the java args trivially into C
 744   // args without worry of overwriting during the xfer
 745 
 746   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 747   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 748   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 749   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 750   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 751 
 752   LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
 753 
 754   set_no_result(x);
 755 
 756   int flags;
 757   ciArrayKlass* expected_type = NULL;
 758   arraycopy_helper(x, &flags, &expected_type);
 759 
 760   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp,
 761                expected_type, flags, info); // does add_safepoint
 762 }
 763 
 764 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 765   ShouldNotReachHere();
 766 }
 767 
 768 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
 769   ShouldNotReachHere();
 770 }
 771 
 772 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
 773   assert(x->number_of_arguments() == 3, "wrong type");
 774   assert(UseFMA, "Needs FMA instructions support.");
 775   LIRItem value(x->argument_at(0), this);
 776   LIRItem value1(x->argument_at(1), this);
 777   LIRItem value2(x->argument_at(2), this);
 778 
 779   value.load_item();
 780   value1.load_item();
 781   value2.load_item();
 782 
 783   LIR_Opr calc_input = value.result();
 784   LIR_Opr calc_input1 = value1.result();
 785   LIR_Opr calc_input2 = value2.result();
 786   LIR_Opr calc_result = rlock_result(x);
 787 
 788   switch (x->id()) {
 789     case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
 790     case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
 791     default:                    ShouldNotReachHere();
 792   }
 793 }
 794 
 795 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
 796   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
 797 }
 798 
 799 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 800 // _i2b, _i2c, _i2s
 801 void LIRGenerator::do_Convert(Convert* x) {
 802   LIRItem value(x->value(), this);
 803   value.load_item();
 804   LIR_Opr input = value.result();
 805   LIR_Opr result = rlock(x);
 806 
 807   // arguments of lir_convert
 808   LIR_Opr conv_input = input;
 809   LIR_Opr conv_result = result;
 810 
 811   __ convert(x->op(), conv_input, conv_result);
 812 
 813   assert(result->is_virtual(), "result must be virtual register");
 814   set_result(x, result);
 815 }
 816 
 817 void LIRGenerator::do_NewInstance(NewInstance* x) {
 818 #ifndef PRODUCT
 819   if (PrintNotLoaded && !x->klass()->is_loaded()) {
 820     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
 821   }
 822 #endif
 823   CodeEmitInfo* info = state_for(x, x->state());
 824   LIR_Opr reg = result_register_for(x->type());
 825   new_instance(reg, x->klass(), x->is_unresolved(),
 826                FrameMap::r12_oop_opr,
 827                FrameMap::r15_oop_opr,
 828                FrameMap::r14_oop_opr,
 829                LIR_OprFact::illegalOpr,
 830                FrameMap::r13_metadata_opr,
 831                info);
 832   LIR_Opr result = rlock_result(x);
 833   __ move(reg, result);
 834 }
 835 
 836 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 837   CodeEmitInfo* info = state_for(x, x->state());
 838 
 839   LIRItem length(x->length(), this);
 840   length.load_item_force(FrameMap::r9_opr);
 841 
 842   LIR_Opr reg = result_register_for(x->type());
 843   LIR_Opr tmp1 = FrameMap::r12_oop_opr;
 844   LIR_Opr tmp2 = FrameMap::r14_oop_opr;
 845   LIR_Opr tmp3 = FrameMap::r15_oop_opr;
 846   LIR_Opr tmp4 = reg;
 847   LIR_Opr klass_reg = FrameMap::r13_metadata_opr;
 848   LIR_Opr len = length.result();
 849   BasicType elem_type = x->elt_type();
 850 
 851   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 852 
 853   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
 854   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
 855 
 856   LIR_Opr result = rlock_result(x);
 857   __ move(reg, result);
 858 }
 859 
 860 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
 861   LIRItem length(x->length(), this);
 862   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
 863   // and therefore provide the state before the parameters have been consumed
 864   CodeEmitInfo* patching_info = NULL;
 865   if (!x->klass()->is_loaded() || PatchALot) {
 866     patching_info =  state_for(x, x->state_before());
 867   }
 868 
 869   CodeEmitInfo* info = state_for(x, x->state());
 870 
 871   LIR_Opr reg = result_register_for(x->type());
 872   LIR_Opr tmp1 = FrameMap::r12_oop_opr;
 873   LIR_Opr tmp2 = FrameMap::r14_oop_opr;
 874   LIR_Opr tmp3 = FrameMap::r15_oop_opr;
 875   LIR_Opr tmp4 = reg;
 876   LIR_Opr klass_reg = FrameMap::r13_metadata_opr;
 877 
 878   length.load_item_force(FrameMap::r9_opr);
 879   LIR_Opr len = length.result();
 880 
 881   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
 882   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
 883   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
 884     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
 885   }
 886   klass2reg_with_patching(klass_reg, obj, patching_info);
 887   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
 888 
 889   LIR_Opr result = rlock_result(x);
 890   __ move(reg, result);
 891 }
 892 
 893 
 894 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
 895   Values* dims = x->dims();
 896   int i = dims->length();
 897   LIRItemList* items = new LIRItemList(i, i, NULL);
 898   while (i-- > 0) {
 899     LIRItem* size = new LIRItem(dims->at(i), this);
 900     items->at_put(i, size);
 901   }
 902 
 903   // Evaluate state_for early since it may emit code.
 904   CodeEmitInfo* patching_info = NULL;
 905   if (!x->klass()->is_loaded() || PatchALot) {
 906     patching_info = state_for(x, x->state_before());
 907 
 908     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
 909     // clone all handlers (NOTE: Usually this is handled transparently
 910     // by the CodeEmitInfo cloning logic in CodeStub constructors but
 911     // is done explicitly here because a stub isn't being used).
 912     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
 913   }
 914   CodeEmitInfo* info = state_for(x, x->state());
 915 
 916   i = dims->length();
 917   while (i-- > 0) {
 918     LIRItem* size = items->at(i);
 919     size->load_item();
 920 
 921     store_stack_parameter(size->result(), in_ByteSize(i * BytesPerInt));
 922   }
 923 
 924   LIR_Opr klass_reg = FrameMap::r10_metadata_opr;
 925   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
 926 
 927   LIR_Opr rank = FrameMap::r9_opr;
 928   __ move(LIR_OprFact::intConst(x->rank()), rank);
 929   LIR_Opr varargs = FrameMap::r12_opr;
 930   __ move(FrameMap::sp_opr, varargs);
 931   LIR_OprList* args = new LIR_OprList(3);
 932   args->append(klass_reg);
 933   args->append(rank);
 934   args->append(varargs);
 935   LIR_Opr reg = result_register_for(x->type());
 936   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
 937                   LIR_OprFact::illegalOpr,
 938                   reg, args, info);
 939 
 940   LIR_Opr result = rlock_result(x);
 941   __ move(reg, result);
 942 }
 943 
 944 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
 945   // nothing to do for now
 946 }
 947 
 948 void LIRGenerator::do_CheckCast(CheckCast* x) {
 949   LIRItem obj(x->obj(), this);
 950 
 951   CodeEmitInfo* patching_info = NULL;
 952   if (!x->klass()->is_loaded() ||
 953       (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
 954     // must do this before locking the destination register as an oop register,
 955     // and before the obj is loaded (the latter is for deoptimization)
 956     patching_info = state_for(x, x->state_before());
 957   }
 958   obj.load_item();
 959 
 960   // info for exceptions
 961   CodeEmitInfo* info_for_exception =
 962       (x->needs_exception_state() ? state_for(x) :
 963                                     state_for(x, x->state_before(), true /*ignore_xhandler*/ ));
 964 
 965   CodeStub* stub = NULL;
 966   if (x->is_incompatible_class_change_check()) {
 967     assert(patching_info == NULL, "can't patch this");
 968     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr,
 969                                    info_for_exception);
 970   } else if (x->is_invokespecial_receiver_check()) {
 971     assert(patching_info == NULL, "can't patch this");
 972     stub = new DeoptimizeStub(info_for_exception,
 973                               Deoptimization::Reason_class_check,
 974                               Deoptimization::Action_none);
 975   } else {
 976     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
 977   }
 978   LIR_Opr reg = rlock_result(x);
 979   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 980   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
 981     tmp3 = new_register(objectType);
 982   }
 983   __ checkcast(reg, obj.result(), x->klass(),
 984                new_register(objectType), new_register(objectType), tmp3,
 985                x->direct_compare(), info_for_exception, patching_info, stub,
 986                x->profiled_method(), x->profiled_bci());
 987 }
 988 
 989 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
 990   LIRItem obj(x->obj(), this);
 991 
 992   // result and test object may not be in same register
 993   LIR_Opr reg = rlock_result(x);
 994   CodeEmitInfo* patching_info = NULL;
 995   if ((!x->klass()->is_loaded() || PatchALot)) {
 996     // must do this before locking the destination register as an oop register
 997     patching_info = state_for(x, x->state_before());
 998   }
 999   obj.load_item();
1000   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1001   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1002     tmp3 = new_register(objectType);
1003   }
1004   __ instanceof(reg, obj.result(), x->klass(),
1005                 new_register(objectType), new_register(objectType), tmp3,
1006                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1007 }
1008 
1009 void LIRGenerator::do_If(If* x) {
1010   // If should have two successors
1011   assert(x->number_of_sux() == 2, "inconsistency");
1012   ValueTag tag = x->x()->type()->tag();
1013   bool is_safepoint = x->is_safepoint();
1014 
1015   If::Condition cond = x->cond();
1016 
1017   LIRItem xitem(x->x(), this);
1018   LIRItem yitem(x->y(), this);
1019   LIRItem* xin = &xitem;
1020   LIRItem* yin = &yitem;
1021 
1022   if (tag == longTag) {
1023     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1024     // mirror for other conditions
1025     if (cond == If::gtr || cond == If::leq) {
1026       cond = Instruction::mirror(cond);
1027       xin = &yitem;
1028       yin = &xitem;
1029     }
1030     xin->set_destroys_register();
1031   }
1032   xin->load_item();
1033   yin->load_item();
1034 
1035   set_no_result(x);
1036 
1037   LIR_Opr left = xin->result();
1038   LIR_Opr right = yin->result();
1039 
1040   // add safepoint before generating condition code so it can be recomputed
1041   if (x->is_safepoint()) {
1042     // increment backedge counter if needed
1043     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1044                                              x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1045     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1046   }
1047 
1048   // Generate branch profiling. Profiling code doesn't kill flags.
1049   __ cmp(lir_cond(cond), left, right);
1050   profile_branch(x, cond);
1051   move_to_phi(x->state());
1052   if (x->x()->type()->is_float_kind()) {
1053     __ branch(lir_cond(cond), x->tsux(), x->usux());
1054   } else {
1055     __ branch(lir_cond(cond), x->tsux());
1056   }
1057   assert(x->default_sux() == x->fsux(), "wrong destination above");
1058   __ jump(x->default_sux());
1059 }
1060 
1061 LIR_Opr LIRGenerator::getThreadPointer() {
1062    return FrameMap::as_pointer_opr(xthread);
1063 }
1064 
1065 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1066 
1067 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1068                                         CodeEmitInfo* info) {
1069   __ volatile_store_mem_reg(value, address, info);
1070 }
1071 
1072 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1073                                        CodeEmitInfo* info) {
1074   __ volatile_load_mem_reg(address, result, info);
1075 }