1 /*
   2  * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_FrameMap.hpp"
  31 #include "c1/c1_Instruction.hpp"
  32 #include "c1/c1_LIRAssembler.hpp"
  33 #include "c1/c1_LIRGenerator.hpp"
  34 #include "c1/c1_Runtime1.hpp"
  35 #include "c1/c1_ValueStack.hpp"
  36 #include "ci/ciArray.hpp"
  37 #include "ci/ciObjArrayKlass.hpp"
  38 #include "ci/ciTypeArrayKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 #include "vmreg_riscv.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 // Item will be loaded into a byte register; Intel only
  51 void LIRItem::load_byte_item() {
  52   load_item();
  53 }
  54 
  55 
  56 void LIRItem::load_nonconstant() {
  57   LIR_Opr r = value()->operand();
  58   if (r->is_constant()) {
  59     _result = r;
  60   } else {
  61     load_item();
  62   }
  63 }
  64 
  65 //--------------------------------------------------------------
  66 //               LIRGenerator
  67 //--------------------------------------------------------------
  68 
  69 
  70 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r10_oop_opr; }
  71 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r13_opr; }
  72 LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
  73 LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  74 LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  75 LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
  76 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
  77 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r10_opr; }
  78 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  79 
  80 
  81 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  82   LIR_Opr opr;
  83   switch (type->tag()) {
  84     case intTag:     opr = FrameMap::r10_opr;          break;
  85     case objectTag:  opr = FrameMap::r10_oop_opr;      break;
  86     case longTag:    opr = FrameMap::long10_opr;       break;
  87     case floatTag:   opr = FrameMap::fpu10_float_opr;  break;
  88     case doubleTag:  opr = FrameMap::fpu10_double_opr; break;
  89 
  90     case addressTag: // fall through
  91     default:
  92       ShouldNotReachHere();
  93       return LIR_OprFact::illegalOpr;
  94   }
  95 
  96   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  97   return opr;
  98 }
  99 
 100 
 101 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 102   LIR_Opr reg = new_register(T_INT);
 103   set_vreg_flag(reg, LIRGenerator::byte_reg);
 104   return reg;
 105 }
 106 
 107 //--------- loading items into registers --------------------------------
 108 
 109 
 110 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 111   if (v->type()->as_IntConstant() != NULL) {
 112     return v->type()->as_IntConstant()->value() == 0;
 113   } else if (v->type()->as_LongConstant() != NULL) {
 114     return v->type()->as_LongConstant()->value() == 0;
 115   } else if (v->type()->as_ObjectConstant() != NULL) {
 116     return v->type()->as_ObjectConstant()->value()->is_null_object();
 117   } else if (v->type()->as_FloatConstant() != NULL) {
 118     return jint_cast(v->type()->as_FloatConstant()->value()) == 0.0f;
 119   } else if (v->type()->as_DoubleConstant() != NULL) {
 120     return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0.0;
 121   }
 122   return false;
 123 }
 124 
 125 bool LIRGenerator::can_inline_as_constant(Value v) const {
 126   if (v->type()->as_IntConstant() != NULL) {
 127     int value = v->type()->as_IntConstant()->value();
 128     // "-value" must be defined for value may be used for sub
 129     return Assembler::operand_valid_for_add_immediate(value) &&
 130            Assembler::operand_valid_for_add_immediate(- value);
 131   } else if (v->type()->as_ObjectConstant() != NULL) {
 132     return v->type()->as_ObjectConstant()->value()->is_null_object();
 133   } else if (v->type()->as_LongConstant() != NULL) {
 134     long value = v->type()->as_LongConstant()->value();
 135     // "-value" must be defined for value may be used for sub
 136     return Assembler::operand_valid_for_add_immediate(value) &&
 137            Assembler::operand_valid_for_add_immediate(- value);
 138   } else if (v->type()->as_FloatConstant() != NULL) {
 139     return v->type()->as_FloatConstant()->value() == 0.0f;
 140   } else if (v->type()->as_DoubleConstant() != NULL) {
 141     return v->type()->as_DoubleConstant()->value() == 0.0;
 142   }
 143   return false;
 144 }
 145 
 146 
 147 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 148   if (c->as_constant() != NULL) {
 149     long constant = 0;
 150     switch (c->type()) {
 151       case T_INT:  constant = c->as_jint();   break;
 152       case T_LONG: constant = c->as_jlong();  break;
 153       default:     return false;
 154     }
 155     // "-constant" must be defined for c may be used for sub
 156     return Assembler::operand_valid_for_add_immediate(constant) &&
 157            Assembler::operand_valid_for_add_immediate(- constant);
 158   }
 159   return false;
 160 }
 161 
 162 
 163 LIR_Opr LIRGenerator::safepoint_poll_register() {
 164   return LIR_OprFact::illegalOpr;
 165 }
 166 
 167 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 168                                             int shift, int disp, BasicType type) {
 169   assert(base->is_register(), "must be");
 170   if (index->is_constant()) {
 171     LIR_Const *constant = index->as_constant_ptr();
 172     jlong c;
 173     if (constant->type() == T_INT) {
 174       c = (jlong(index->as_jint()) << shift) + disp;
 175     } else {
 176       assert(constant->type() == T_LONG, "should be");
 177       c = (index->as_jlong() << shift) + disp;
 178     }
 179     if ((jlong)((jint)c) == c) {
 180       return new LIR_Address(base, (jint)c, type);
 181     } else {
 182       LIR_Opr tmp = new_register(T_LONG);
 183       __ move(index, tmp);
 184       return new LIR_Address(base, tmp, type);
 185     }
 186   } else {
 187     return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
 188   }
 189 }
 190 
 191 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 192                                               BasicType type) {
 193   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 194   int elem_size = type2aelembytes(type);
 195   int shift = log2i_exact(elem_size);
 196 
 197   LIR_Address* addr = NULL;
 198   if (index_opr->is_constant()) {
 199     addr = new LIR_Address(array_opr, offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 200   } else {
 201     if (index_opr->type() == T_INT) {
 202       LIR_Opr tmp = new_register(T_LONG);
 203       __ convert(Bytecodes::_i2l, index_opr, tmp);
 204       index_opr = tmp;
 205     }
 206     addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), offset_in_bytes, type);
 207   }
 208   return addr;
 209 }
 210 
 211 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 212   switch (type) {
 213     case T_LONG: return LIR_OprFact::longConst(x);
 214     case T_INT:  return LIR_OprFact::intConst(x);
 215     default:     ShouldNotReachHere();
 216   }
 217   return NULL;
 218 }
 219 
 220 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 221   LIR_Opr pointer = new_pointer_register();
 222   __ move(LIR_OprFact::intptrConst(counter), pointer);
 223   LIR_Address* addr = new LIR_Address(pointer, type);
 224   increment_counter(addr, step);
 225 }
 226 
 227 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 228   LIR_Opr reg = new_register(addr->type());
 229   __ load(addr, reg);
 230   __ add(reg, load_immediate(step, addr->type()), reg);
 231   __ store(reg, addr);
 232 }
 233 
 234 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 235   LIR_Opr reg = new_register(T_INT);
 236   __ load(generate_address(base, disp, T_INT), reg, info);
 237   __ cmp(condition, reg, LIR_OprFact::intConst(c));
 238 }
 239 
 240 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 241   LIR_Opr reg1 = new_register(T_INT);
 242   __ load(generate_address(base, disp, type), reg1, info);
 243   __ cmp(condition, reg, reg1);
 244 }
 245 
 246 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
 247   if (tmp->is_valid() && c > 0 && c < max_jint) {
 248     if (is_power_of_2(c - 1)) {
 249       __ shift_left(left, log2i_exact(c - 1), tmp);
 250       __ add(tmp, left, result);
 251       return true;
 252     } else if (is_power_of_2(c + 1)) {
 253       __ shift_left(left, log2i_exact(c + 1), tmp);
 254       __ sub(tmp, left, result);
 255       return true;
 256     }
 257   }
 258   return false;
 259 }
 260 
 261 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 262   BasicType type = item->type();
 263   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 264 }
 265 
 266 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info,
 267                                      ciMethod* profiled_method, int profiled_bci) {
 268     LIR_Opr tmp1 = new_register(objectType);
 269     LIR_Opr tmp2 = new_register(objectType);
 270     LIR_Opr tmp3 = new_register(objectType);
 271     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 272 }
 273 
 274 //----------------------------------------------------------------------
 275 //             visitor functions
 276 //----------------------------------------------------------------------
 277 
 278 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 279   assert(x->is_pinned(), "");
 280   LIRItem obj(x->obj(), this);
 281   obj.load_item();
 282 
 283   set_no_result(x);
 284 
 285   // "lock" stores the address of the monitor stack slot, so this is not an oop
 286   LIR_Opr lock = new_register(T_INT);
 287 
 288   CodeEmitInfo* info_for_exception = NULL;
 289   if (x->needs_null_check()) {
 290     info_for_exception = state_for(x);
 291   }
 292   // this CodeEmitInfo must not have the xhandlers because here the
 293   // object is already locked (xhandlers expect object to be unlocked)
 294   CodeEmitInfo* info = state_for(x, x->state(), true);
 295   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 296                 x->monitor_no(), info_for_exception, info);
 297 }
 298 
 299 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 300   assert(x->is_pinned(), "");
 301 
 302   LIRItem obj(x->obj(), this);
 303   obj.dont_load_item();
 304 
 305   LIR_Opr lock = new_register(T_INT);
 306   LIR_Opr obj_temp = new_register(T_INT);
 307   set_no_result(x);
 308   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 309 }
 310 
 311 // neg
 312 void LIRGenerator::do_NegateOp(NegateOp* x) {
 313   LIRItem from(x->x(), this);
 314   from.load_item();
 315   LIR_Opr result = rlock_result(x);
 316   __ negate(from.result(), result);
 317 }
 318 
 319 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 320 //      _dadd, _dmul, _dsub, _ddiv, _drem
 321 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 322   LIRItem left(x->x(), this);
 323   LIRItem right(x->y(), this);
 324 
 325   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
 326 
 327     // float remainder is implemented as a direct call into the runtime
 328     BasicTypeList signature(2);
 329     if (x->op() == Bytecodes::_frem) {
 330       signature.append(T_FLOAT);
 331       signature.append(T_FLOAT);
 332     } else {
 333       signature.append(T_DOUBLE);
 334       signature.append(T_DOUBLE);
 335     }
 336     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 337 
 338     const LIR_Opr result_reg = result_register_for(x->type());
 339 
 340     left.load_item();
 341     __ move(left.result(), cc->at(0));
 342     right.load_item_force(cc->at(1));
 343 
 344     address entry;
 345     if (x->op() == Bytecodes::_frem) {
 346       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 347     } else {
 348       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 349     }
 350 
 351     LIR_Opr result = rlock_result(x);
 352     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 353     __ move(result_reg, result);
 354 
 355     return;
 356   }
 357 
 358   if (!left.is_register()) {
 359     left.load_item();
 360   }
 361   // Always load right hand side.
 362   right.load_item();
 363 
 364   LIR_Opr reg = rlock(x);
 365   arithmetic_op_fpu(x->op(), reg, left.result(), right.result());
 366 
 367   set_result(x, round_item(reg));
 368 }
 369 
 370 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 371 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 372 
 373   // missing test if instr is commutative and if we should swap
 374   LIRItem left(x->x(),  this);
 375   LIRItem right(x->y(), this);
 376 
 377   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 378 
 379     left.load_item();
 380 
 381     bool need_zero_check = true;
 382     if (right.is_constant()) {
 383       jlong c = right.get_jlong_constant();
 384       // no need to do div-by-zero check if the divisor is a non-zero constant
 385       if (c != 0) { need_zero_check = false; }
 386       // do not load right if the divisor is a power-of-2 constant
 387       if (c > 0 && is_power_of_2(c)) {
 388         right.dont_load_item();
 389       } else {
 390         right.load_item();
 391       }
 392     } else {
 393       right.load_item();
 394     }
 395     if (need_zero_check) {
 396       CodeEmitInfo* info = state_for(x);
 397       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 398       __ branch(lir_cond_equal, new DivByZeroStub(info));
 399     }
 400 
 401     rlock_result(x);
 402     switch (x->op()) {
 403       case Bytecodes::_lrem:
 404         __ rem(left.result(), right.result(), x->operand());
 405         break;
 406       case Bytecodes::_ldiv:
 407         __ div(left.result(), right.result(), x->operand());
 408         break;
 409       default:
 410         ShouldNotReachHere();
 411     }
 412   } else {
 413     assert(x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
 414            "expect lmul, ladd or lsub");
 415     // add, sub, mul
 416     left.load_item();
 417     if (!right.is_register()) {
 418       if (x->op() == Bytecodes::_lmul ||
 419           !right.is_constant() ||
 420           (x->op() == Bytecodes::_ladd &&
 421           !Assembler::operand_valid_for_add_immediate(right.get_jlong_constant())) ||
 422           (x->op() == Bytecodes::_lsub &&
 423           !Assembler::operand_valid_for_add_immediate(-right.get_jlong_constant()))) {
 424             right.load_item();
 425       } else { // add, sub
 426         assert(x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expected ladd or lsub");
 427         // don't load constants to save register
 428         right.load_nonconstant();
 429       }
 430     }
 431     rlock_result(x);
 432     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 433   }
 434 }
 435 
 436 // for: _iadd, _imul, _isub, _idiv, _irem
 437 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 438 
 439   // Test if instr is commutative and if we should swap
 440   LIRItem left(x->x(),  this);
 441   LIRItem right(x->y(), this);
 442   LIRItem* left_arg = &left;
 443   LIRItem* right_arg = &right;
 444   if (x->is_commutative() && left.is_stack() && right.is_register()) {
 445     // swap them if left is real stack (or cached) and right is real register(not cached)
 446     left_arg = &right;
 447     right_arg = &left;
 448   }
 449   left_arg->load_item();
 450   // do not need to load right, as we can handle stack and constants
 451   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 452 
 453     rlock_result(x);
 454 
 455     bool need_zero_check = true;
 456     if (right.is_constant()) {
 457       jint c = right.get_jint_constant();
 458       // no need to do div-by-zero check if the divisor is a non-zero constant
 459       if (c != 0) { need_zero_check = false; }
 460       // do not load right if the divisor is a power-of-2 constant
 461       if (c > 0 && is_power_of_2(c)) {
 462         right_arg->dont_load_item();
 463       } else {
 464         right_arg->load_item();
 465       }
 466     } else {
 467       right_arg->load_item();
 468     }
 469     if (need_zero_check) {
 470       CodeEmitInfo* info = state_for(x);
 471       __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
 472       __ branch(lir_cond_equal, new DivByZeroStub(info));
 473     }
 474 
 475     LIR_Opr ill = LIR_OprFact::illegalOpr;
 476     if (x->op() == Bytecodes::_irem) {
 477       __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
 478     } else if (x->op() == Bytecodes::_idiv) {
 479       __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
 480     }
 481 
 482   } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
 483     if (right.is_constant() &&
 484         ((x->op() == Bytecodes::_iadd && !Assembler::operand_valid_for_add_immediate(right.get_jint_constant())) ||
 485          (x->op() == Bytecodes::_isub && !Assembler::operand_valid_for_add_immediate(-right.get_jint_constant())))) {
 486       right.load_nonconstant();
 487     } else {
 488       right.load_item();
 489     }
 490     rlock_result(x);
 491     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
 492   } else {
 493     assert (x->op() == Bytecodes::_imul, "expect imul");
 494     if (right.is_constant()) {
 495       jint c = right.get_jint_constant();
 496       if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
 497         right_arg->dont_load_item();
 498       } else {
 499         // Cannot use constant op.
 500         right_arg->load_item();
 501       }
 502     } else {
 503       right.load_item();
 504     }
 505     rlock_result(x);
 506     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
 507   }
 508 }
 509 
 510 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 511   // when an operand with use count 1 is the left operand, then it is
 512   // likely that no move for 2-operand-LIR-form is necessary
 513   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 514     x->swap_operands();
 515   }
 516 
 517   ValueTag tag = x->type()->tag();
 518   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 519   switch (tag) {
 520     case floatTag:
 521     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 522     case longTag:    do_ArithmeticOp_Long(x); return;
 523     case intTag:     do_ArithmeticOp_Int(x);  return;
 524     default:         ShouldNotReachHere();    return;
 525   }
 526 }
 527 
 528 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 529 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 530   LIRItem value(x->x(), this);
 531   LIRItem count(x->y(), this);
 532 
 533   value.load_item();
 534   if (count.is_constant()) {
 535     assert(count.type()->as_IntConstant() != NULL || count.type()->as_LongConstant() != NULL , "should be");
 536     count.dont_load_item();
 537   } else {
 538     count.load_item();
 539   }
 540 
 541   LIR_Opr res = rlock_result(x);
 542   shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr);
 543 }
 544 
 545 
 546 // _iand, _land, _ior, _lor, _ixor, _lxor
 547 void LIRGenerator::do_LogicOp(LogicOp* x) {
 548 
 549   LIRItem left(x->x(),  this);
 550   LIRItem right(x->y(), this);
 551 
 552   left.load_item();
 553   rlock_result(x);
 554   ValueTag tag = right.type()->tag();
 555   if(right.is_constant() &&
 556      ((tag == longTag && Assembler::operand_valid_for_add_immediate(right.get_jlong_constant())) ||
 557       (tag == intTag && Assembler::operand_valid_for_add_immediate(right.get_jint_constant()))))  {
 558     right.dont_load_item();
 559   } else {
 560     right.load_item();
 561   }
 562 
 563   switch (x->op()) {
 564     case Bytecodes::_iand:  // fall through
 565     case Bytecodes::_land:
 566       __ logical_and(left.result(), right.result(), x->operand()); break;
 567     case Bytecodes::_ior:   // fall through
 568     case Bytecodes::_lor:
 569       __ logical_or(left.result(), right.result(), x->operand()); break;
 570     case Bytecodes::_ixor:  // fall through
 571     case Bytecodes::_lxor:
 572       __ logical_xor(left.result(), right.result(), x->operand()); break;
 573     default: Unimplemented();
 574   }
 575 }
 576 
 577 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 578 void LIRGenerator::do_CompareOp(CompareOp* x) {
 579   LIRItem left(x->x(), this);
 580   LIRItem right(x->y(), this);
 581   ValueTag tag = x->x()->type()->tag();
 582   if (tag == longTag) {
 583     left.set_destroys_register();
 584   }
 585   left.load_item();
 586   right.load_item();
 587   LIR_Opr reg = rlock_result(x);
 588 
 589   if (x->x()->type()->is_float_kind()) {
 590     Bytecodes::Code code = x->op();
 591     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 592   } else if (x->x()->type()->tag() == longTag) {
 593     __ lcmp2int(left.result(), right.result(), reg);
 594   } else {
 595     Unimplemented();
 596   }
 597 }
 598 
 599 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 600   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 601   new_value.load_item();
 602   cmp_value.load_item();
 603   LIR_Opr result = new_register(T_INT);
 604   if (is_reference_type(type)) {
 605     __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
 606   } else if (type == T_INT) {
 607     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 608   } else if (type == T_LONG) {
 609     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 610   } else {
 611     ShouldNotReachHere();
 612   }
 613   __ logical_xor(FrameMap::r5_opr, LIR_OprFact::intConst(1), result);
 614   return result;
 615 }
 616 
 617 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 618   bool is_oop = is_reference_type(type);
 619   LIR_Opr result = new_register(type);
 620   value.load_item();
 621   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
 622   LIR_Opr tmp = new_register(T_INT);
 623   __ xchg(addr, value.result(), result, tmp);
 624   return result;
 625 }
 626 
 627 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 628   LIR_Opr result = new_register(type);
 629   value.load_item();
 630   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
 631   LIR_Opr tmp = new_register(T_INT);
 632   __ xadd(addr, value.result(), result, tmp);
 633   return result;
 634 }
 635 
 636 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 637   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow),
 638          "wrong type");
 639 
 640   switch (x->id()) {
 641     case vmIntrinsics::_dexp: // fall through
 642     case vmIntrinsics::_dlog: // fall through
 643     case vmIntrinsics::_dpow: // fall through
 644     case vmIntrinsics::_dcos: // fall through
 645     case vmIntrinsics::_dsin: // fall through
 646     case vmIntrinsics::_dtan: // fall through
 647     case vmIntrinsics::_dlog10:
 648       do_LibmIntrinsic(x);
 649       break;
 650     case vmIntrinsics::_dabs: // fall through
 651     case vmIntrinsics::_dsqrt: {
 652       assert(x->number_of_arguments() == 1, "wrong type");
 653       LIRItem value(x->argument_at(0), this);
 654       value.load_item();
 655       LIR_Opr dst = rlock_result(x);
 656 
 657       if (x->id() == vmIntrinsics::_dsqrt) {
 658         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 659       } else { // vmIntrinsics::_dabs
 660         __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 661       }
 662       break;
 663     }
 664     default: ShouldNotReachHere();
 665   }
 666 }
 667 
 668 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
 669   LIRItem value(x->argument_at(0), this);
 670   value.set_destroys_register();
 671   LIR_Opr calc_result = rlock_result(x);
 672   LIR_Opr result_reg = result_register_for(x->type());
 673   CallingConvention* cc = NULL;
 674   BasicTypeList signature(1);
 675   signature.append(T_DOUBLE);
 676   if (x->id() == vmIntrinsics::_dpow) { signature.append(T_DOUBLE); }
 677   cc = frame_map()->c_calling_convention(&signature);
 678   value.load_item_force(cc->at(0));
 679   if (x->id() == vmIntrinsics::_dpow) {
 680     LIRItem value1(x->argument_at(1), this);
 681     value1.set_destroys_register();
 682     value1.load_item_force(cc->at(1));
 683   }
 684   switch (x->id()) {
 685     case vmIntrinsics::_dexp:
 686       if (StubRoutines::dexp() != NULL) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); }
 687       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); }
 688       break;
 689     case vmIntrinsics::_dlog:
 690       if (StubRoutines::dlog() != NULL) {  __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); }
 691       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); }
 692       break;
 693     case vmIntrinsics::_dlog10:
 694       if (StubRoutines::dlog10() != NULL) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); }
 695       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); }
 696       break;
 697     case vmIntrinsics::_dsin:
 698       if (StubRoutines::dsin() != NULL) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); }
 699       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); }
 700       break;
 701     case vmIntrinsics::_dcos:
 702       if (StubRoutines::dcos() != NULL) {  __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); }
 703       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); }
 704       break;
 705     case vmIntrinsics::_dtan:
 706       if (StubRoutines::dtan() != NULL) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); }
 707       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); }
 708       break;
 709     case vmIntrinsics::_dpow:
 710       if (StubRoutines::dpow() != NULL) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); }
 711       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); }
 712       break;
 713     default:  ShouldNotReachHere();
 714   }
 715   __ move(result_reg, calc_result);
 716 }
 717 
 718 
 719 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 720   assert(x->number_of_arguments() == 5, "wrong type");
 721 
 722   // Make all state_for calls early since they can emit code
 723   CodeEmitInfo* info = state_for(x, x->state());
 724 
 725   LIRItem src(x->argument_at(0), this);
 726   LIRItem src_pos(x->argument_at(1), this);
 727   LIRItem dst(x->argument_at(2), this);
 728   LIRItem dst_pos(x->argument_at(3), this);
 729   LIRItem length(x->argument_at(4), this);
 730 
 731   // operands for arraycopy must use fixed registers, otherwise
 732   // LinearScan will fail allocation (because arraycopy always needs a
 733   // call)
 734 
 735   // The java calling convention will give us enough registers
 736   // so that on the stub side the args will be perfect already.
 737   // On the other slow/special case side we call C and the arg
 738   // positions are not similar enough to pick one as the best.
 739   // Also because the java calling convention is a "shifted" version
 740   // of the C convention we can process the java args trivially into C
 741   // args without worry of overwriting during the xfer
 742 
 743   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 744   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 745   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 746   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 747   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 748 
 749   LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
 750 
 751   set_no_result(x);
 752 
 753   int flags;
 754   ciArrayKlass* expected_type = NULL;
 755   arraycopy_helper(x, &flags, &expected_type);
 756 
 757   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp,
 758                expected_type, flags, info); // does add_safepoint
 759 }
 760 
 761 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 762   ShouldNotReachHere();
 763 }
 764 
 765 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
 766   ShouldNotReachHere();
 767 }
 768 
 769 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
 770   assert(x->number_of_arguments() == 3, "wrong type");
 771   assert(UseFMA, "Needs FMA instructions support.");
 772   LIRItem value(x->argument_at(0), this);
 773   LIRItem value1(x->argument_at(1), this);
 774   LIRItem value2(x->argument_at(2), this);
 775 
 776   value.load_item();
 777   value1.load_item();
 778   value2.load_item();
 779 
 780   LIR_Opr calc_input = value.result();
 781   LIR_Opr calc_input1 = value1.result();
 782   LIR_Opr calc_input2 = value2.result();
 783   LIR_Opr calc_result = rlock_result(x);
 784 
 785   switch (x->id()) {
 786     case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
 787     case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
 788     default:                    ShouldNotReachHere();
 789   }
 790 }
 791 
 792 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
 793   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
 794 }
 795 
 796 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 797 // _i2b, _i2c, _i2s
 798 void LIRGenerator::do_Convert(Convert* x) {
 799   LIRItem value(x->value(), this);
 800   value.load_item();
 801   LIR_Opr input = value.result();
 802   LIR_Opr result = rlock(x);
 803 
 804   // arguments of lir_convert
 805   LIR_Opr conv_input = input;
 806   LIR_Opr conv_result = result;
 807 
 808   __ convert(x->op(), conv_input, conv_result);
 809 
 810   assert(result->is_virtual(), "result must be virtual register");
 811   set_result(x, result);
 812 }
 813 
 814 void LIRGenerator::do_NewInstance(NewInstance* x) {
 815 #ifndef PRODUCT
 816   if (PrintNotLoaded && !x->klass()->is_loaded()) {
 817     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
 818   }
 819 #endif
 820   CodeEmitInfo* info = state_for(x, x->state());
 821   LIR_Opr reg = result_register_for(x->type());
 822   new_instance(reg, x->klass(), x->is_unresolved(),
 823                FrameMap::r12_oop_opr,
 824                FrameMap::r15_oop_opr,
 825                FrameMap::r14_oop_opr,
 826                LIR_OprFact::illegalOpr,
 827                FrameMap::r13_metadata_opr,
 828                info);
 829   LIR_Opr result = rlock_result(x);
 830   __ move(reg, result);
 831 }
 832 
 833 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 834   CodeEmitInfo* info = state_for(x, x->state());
 835 
 836   LIRItem length(x->length(), this);
 837   length.load_item_force(FrameMap::r9_opr);
 838 
 839   LIR_Opr reg = result_register_for(x->type());
 840   LIR_Opr tmp1 = FrameMap::r12_oop_opr;
 841   LIR_Opr tmp2 = FrameMap::r14_oop_opr;
 842   LIR_Opr tmp3 = FrameMap::r15_oop_opr;
 843   LIR_Opr tmp4 = reg;
 844   LIR_Opr klass_reg = FrameMap::r13_metadata_opr;
 845   LIR_Opr len = length.result();
 846   BasicType elem_type = x->elt_type();
 847 
 848   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 849 
 850   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
 851   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
 852 
 853   LIR_Opr result = rlock_result(x);
 854   __ move(reg, result);
 855 }
 856 
 857 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
 858   LIRItem length(x->length(), this);
 859   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
 860   // and therefore provide the state before the parameters have been consumed
 861   CodeEmitInfo* patching_info = NULL;
 862   if (!x->klass()->is_loaded() || PatchALot) {
 863     patching_info =  state_for(x, x->state_before());
 864   }
 865 
 866   CodeEmitInfo* info = state_for(x, x->state());
 867 
 868   LIR_Opr reg = result_register_for(x->type());
 869   LIR_Opr tmp1 = FrameMap::r12_oop_opr;
 870   LIR_Opr tmp2 = FrameMap::r14_oop_opr;
 871   LIR_Opr tmp3 = FrameMap::r15_oop_opr;
 872   LIR_Opr tmp4 = reg;
 873   LIR_Opr klass_reg = FrameMap::r13_metadata_opr;
 874 
 875   length.load_item_force(FrameMap::r9_opr);
 876   LIR_Opr len = length.result();
 877 
 878   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
 879   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
 880   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
 881     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
 882   }
 883   klass2reg_with_patching(klass_reg, obj, patching_info);
 884   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
 885 
 886   LIR_Opr result = rlock_result(x);
 887   __ move(reg, result);
 888 }
 889 
 890 
 891 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
 892   Values* dims = x->dims();
 893   int i = dims->length();
 894   LIRItemList* items = new LIRItemList(i, i, NULL);
 895   while (i-- > 0) {
 896     LIRItem* size = new LIRItem(dims->at(i), this);
 897     items->at_put(i, size);
 898   }
 899 
 900   // Evaluate state_for early since it may emit code.
 901   CodeEmitInfo* patching_info = NULL;
 902   if (!x->klass()->is_loaded() || PatchALot) {
 903     patching_info = state_for(x, x->state_before());
 904 
 905     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
 906     // clone all handlers (NOTE: Usually this is handled transparently
 907     // by the CodeEmitInfo cloning logic in CodeStub constructors but
 908     // is done explicitly here because a stub isn't being used).
 909     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
 910   }
 911   CodeEmitInfo* info = state_for(x, x->state());
 912 
 913   i = dims->length();
 914   while (i-- > 0) {
 915     LIRItem* size = items->at(i);
 916     size->load_item();
 917 
 918     store_stack_parameter(size->result(), in_ByteSize(i * BytesPerInt));
 919   }
 920 
 921   LIR_Opr klass_reg = FrameMap::r10_metadata_opr;
 922   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
 923 
 924   LIR_Opr rank = FrameMap::r9_opr;
 925   __ move(LIR_OprFact::intConst(x->rank()), rank);
 926   LIR_Opr varargs = FrameMap::r12_opr;
 927   __ move(FrameMap::sp_opr, varargs);
 928   LIR_OprList* args = new LIR_OprList(3);
 929   args->append(klass_reg);
 930   args->append(rank);
 931   args->append(varargs);
 932   LIR_Opr reg = result_register_for(x->type());
 933   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
 934                   LIR_OprFact::illegalOpr,
 935                   reg, args, info);
 936 
 937   LIR_Opr result = rlock_result(x);
 938   __ move(reg, result);
 939 }
 940 
 941 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
 942   // nothing to do for now
 943 }
 944 
 945 void LIRGenerator::do_CheckCast(CheckCast* x) {
 946   LIRItem obj(x->obj(), this);
 947 
 948   CodeEmitInfo* patching_info = NULL;
 949   if (!x->klass()->is_loaded() ||
 950       (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
 951     // must do this before locking the destination register as an oop register,
 952     // and before the obj is loaded (the latter is for deoptimization)
 953     patching_info = state_for(x, x->state_before());
 954   }
 955   obj.load_item();
 956 
 957   // info for exceptions
 958   CodeEmitInfo* info_for_exception =
 959       (x->needs_exception_state() ? state_for(x) :
 960                                     state_for(x, x->state_before(), true /*ignore_xhandler*/ ));
 961 
 962   CodeStub* stub = NULL;
 963   if (x->is_incompatible_class_change_check()) {
 964     assert(patching_info == NULL, "can't patch this");
 965     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr,
 966                                    info_for_exception);
 967   } else if (x->is_invokespecial_receiver_check()) {
 968     assert(patching_info == NULL, "can't patch this");
 969     stub = new DeoptimizeStub(info_for_exception,
 970                               Deoptimization::Reason_class_check,
 971                               Deoptimization::Action_none);
 972   } else {
 973     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
 974   }
 975   LIR_Opr reg = rlock_result(x);
 976   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 977   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
 978     tmp3 = new_register(objectType);
 979   }
 980   __ checkcast(reg, obj.result(), x->klass(),
 981                new_register(objectType), new_register(objectType), tmp3,
 982                x->direct_compare(), info_for_exception, patching_info, stub,
 983                x->profiled_method(), x->profiled_bci());
 984 }
 985 
 986 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
 987   LIRItem obj(x->obj(), this);
 988 
 989   // result and test object may not be in same register
 990   LIR_Opr reg = rlock_result(x);
 991   CodeEmitInfo* patching_info = NULL;
 992   if ((!x->klass()->is_loaded() || PatchALot)) {
 993     // must do this before locking the destination register as an oop register
 994     patching_info = state_for(x, x->state_before());
 995   }
 996   obj.load_item();
 997   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 998   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
 999     tmp3 = new_register(objectType);
1000   }
1001   __ instanceof(reg, obj.result(), x->klass(),
1002                 new_register(objectType), new_register(objectType), tmp3,
1003                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1004 }
1005 
1006 void LIRGenerator::do_If(If* x) {
1007   // If should have two successors
1008   assert(x->number_of_sux() == 2, "inconsistency");
1009   ValueTag tag = x->x()->type()->tag();
1010   bool is_safepoint = x->is_safepoint();
1011 
1012   If::Condition cond = x->cond();
1013 
1014   LIRItem xitem(x->x(), this);
1015   LIRItem yitem(x->y(), this);
1016   LIRItem* xin = &xitem;
1017   LIRItem* yin = &yitem;
1018 
1019   if (tag == longTag) {
1020     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1021     // mirror for other conditions
1022     if (cond == If::gtr || cond == If::leq) {
1023       cond = Instruction::mirror(cond);
1024       xin = &yitem;
1025       yin = &xitem;
1026     }
1027     xin->set_destroys_register();
1028   }
1029   xin->load_item();
1030   yin->load_item();
1031 
1032   set_no_result(x);
1033 
1034   LIR_Opr left = xin->result();
1035   LIR_Opr right = yin->result();
1036 
1037   // add safepoint before generating condition code so it can be recomputed
1038   if (x->is_safepoint()) {
1039     // increment backedge counter if needed
1040     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1041                                              x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1042     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1043   }
1044 
1045   // Generate branch profiling. Profiling code doesn't kill flags.
1046   __ cmp(lir_cond(cond), left, right);
1047   profile_branch(x, cond);
1048   move_to_phi(x->state());
1049   if (x->x()->type()->is_float_kind()) {
1050     __ branch(lir_cond(cond), x->tsux(), x->usux());
1051   } else {
1052     __ branch(lir_cond(cond), x->tsux());
1053   }
1054   assert(x->default_sux() == x->fsux(), "wrong destination above");
1055   __ jump(x->default_sux());
1056 }
1057 
1058 LIR_Opr LIRGenerator::getThreadPointer() {
1059    return FrameMap::as_pointer_opr(xthread);
1060 }
1061 
1062 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1063 
1064 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1065                                         CodeEmitInfo* info) {
1066   __ volatile_store_mem_reg(value, address, info);
1067 }
1068 
1069 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1070                                        CodeEmitInfo* info) {
1071   __ volatile_load_mem_reg(address, result, info);
1072 }