1 /*
   2  * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_FrameMap.hpp"
  31 #include "c1/c1_Instruction.hpp"
  32 #include "c1/c1_LIRAssembler.hpp"
  33 #include "c1/c1_LIRGenerator.hpp"
  34 #include "c1/c1_Runtime1.hpp"
  35 #include "c1/c1_ValueStack.hpp"
  36 #include "ci/ciArray.hpp"
  37 #include "ci/ciObjArrayKlass.hpp"
  38 #include "ci/ciTypeArrayKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 #include "vmreg_riscv.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 // Item will be loaded into a byte register; Intel only
  51 void LIRItem::load_byte_item() {
  52   load_item();
  53 }
  54 
  55 
  56 void LIRItem::load_nonconstant() {
  57   LIR_Opr r = value()->operand();
  58   if (r->is_constant()) {
  59     _result = r;
  60   } else {
  61     load_item();
  62   }
  63 }
  64 
  65 //--------------------------------------------------------------
  66 //               LIRGenerator
  67 //--------------------------------------------------------------
  68 
  69 
  70 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r10_oop_opr; }
  71 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r13_opr; }
  72 LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
  73 LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  74 LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  75 LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
  76 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
  77 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r10_opr; }
  78 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  79 
  80 
  81 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  82   LIR_Opr opr;
  83   switch (type->tag()) {
  84     case intTag:     opr = FrameMap::r10_opr;          break;
  85     case objectTag:  opr = FrameMap::r10_oop_opr;      break;
  86     case longTag:    opr = FrameMap::long10_opr;       break;
  87     case floatTag:   opr = FrameMap::fpu10_float_opr;  break;
  88     case doubleTag:  opr = FrameMap::fpu10_double_opr; break;
  89 
  90     case addressTag: // fall through
  91     default:
  92       ShouldNotReachHere();
  93       return LIR_OprFact::illegalOpr;
  94   }
  95 
  96   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  97   return opr;
  98 }
  99 
 100 
 101 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 102   LIR_Opr reg = new_register(T_INT);
 103   set_vreg_flag(reg, LIRGenerator::byte_reg);
 104   return reg;
 105 }
 106 
 107 //--------- loading items into registers --------------------------------
 108 
 109 
 110 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 111   if (v->type()->as_IntConstant() != NULL) {
 112     return v->type()->as_IntConstant()->value() == 0;
 113   } else if (v->type()->as_LongConstant() != NULL) {
 114     return v->type()->as_LongConstant()->value() == 0;
 115   } else if (v->type()->as_ObjectConstant() != NULL) {
 116     return v->type()->as_ObjectConstant()->value()->is_null_object();
 117   } else if (v->type()->as_FloatConstant() != NULL) {
 118     return jint_cast(v->type()->as_FloatConstant()->value()) == 0.0f;
 119   } else if (v->type()->as_DoubleConstant() != NULL) {
 120     return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0.0;
 121   }
 122   return false;
 123 }
 124 
 125 bool LIRGenerator::can_inline_as_constant(Value v) const {
 126   if (v->type()->as_IntConstant() != NULL) {
 127     int value = v->type()->as_IntConstant()->value();
 128     // "-value" must be defined for value may be used for sub
 129     return Assembler::operand_valid_for_add_immediate(value) &&
 130            Assembler::operand_valid_for_add_immediate(- value);
 131   } else if (v->type()->as_ObjectConstant() != NULL) {
 132     return v->type()->as_ObjectConstant()->value()->is_null_object();
 133   } else if (v->type()->as_LongConstant() != NULL) {
 134     long value = v->type()->as_LongConstant()->value();
 135     // "-value" must be defined for value may be used for sub
 136     return Assembler::operand_valid_for_add_immediate(value) &&
 137            Assembler::operand_valid_for_add_immediate(- value);
 138   } else if (v->type()->as_FloatConstant() != NULL) {
 139     return v->type()->as_FloatConstant()->value() == 0.0f;
 140   } else if (v->type()->as_DoubleConstant() != NULL) {
 141     return v->type()->as_DoubleConstant()->value() == 0.0;
 142   }
 143   return false;
 144 }
 145 
 146 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 147   if (c->as_constant() != NULL) {
 148     long constant = 0;
 149     switch (c->type()) {
 150       case T_INT:  constant = c->as_jint();   break;
 151       case T_LONG: constant = c->as_jlong();  break;
 152       default:     return false;
 153     }
 154     // "-constant" must be defined for c may be used for sub
 155     return Assembler::operand_valid_for_add_immediate(constant) &&
 156            Assembler::operand_valid_for_add_immediate(- constant);
 157   }
 158   return false;
 159 }
 160 
 161 LIR_Opr LIRGenerator::safepoint_poll_register() {
 162   return LIR_OprFact::illegalOpr;
 163 }
 164 
 165 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 166                                             int shift, int disp, BasicType type) {
 167   assert(base->is_register(), "must be");
 168 
 169   if (index->is_constant()) {
 170     LIR_Const *constant = index->as_constant_ptr();
 171     jlong c;
 172     if (constant->type() == T_INT) {
 173       c = (jlong(index->as_jint()) << shift) + disp;
 174     } else {
 175       assert(constant->type() == T_LONG, "should be");
 176       c = (index->as_jlong() << shift) + disp;
 177     }
 178     if ((jlong)((jint)c) == c) {
 179       return new LIR_Address(base, (jint)c, type);
 180     } else {
 181       LIR_Opr tmp = new_register(T_LONG);
 182       __ move(index, tmp);
 183       return new LIR_Address(base, tmp, type);
 184     }
 185   }
 186 
 187   return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
 188 }
 189 
 190 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 191                                               BasicType type) {
 192   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 193   int elem_size = type2aelembytes(type);
 194   int shift = exact_log2(elem_size);
 195   return generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
 196 }
 197 
 198 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 199   LIR_Opr r;
 200   switch (type) {
 201     case T_LONG:
 202       r = LIR_OprFact::longConst(x);
 203       break;
 204     case T_INT:
 205       r = LIR_OprFact::intConst(x);
 206       break;
 207     default:
 208       ShouldNotReachHere();
 209   }
 210   return r;
 211 }
 212 
 213 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 214   LIR_Opr pointer = new_pointer_register();
 215   __ move(LIR_OprFact::intptrConst(counter), pointer);
 216   LIR_Address* addr = new LIR_Address(pointer, type);
 217   increment_counter(addr, step);
 218 }
 219 
 220 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 221   LIR_Opr reg = new_register(addr->type());
 222   __ load(addr, reg);
 223   __ add(reg, load_immediate(step, addr->type()), reg);
 224   __ store(reg, addr);
 225 }
 226 
 227 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 228   LIR_Opr reg = new_register(T_INT);
 229   __ load(generate_address(base, disp, T_INT), reg, info);
 230   __ cmp(condition, reg, LIR_OprFact::intConst(c));
 231 }
 232 
 233 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 234   LIR_Opr reg1 = new_register(T_INT);
 235   __ load(generate_address(base, disp, type), reg1, info);
 236   __ cmp(condition, reg, reg1);
 237 }
 238 
 239 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
 240   if (tmp->is_valid() && c > 0 && c < max_jint) {
 241     if (is_power_of_2(c - 1)) {
 242       __ shift_left(left, exact_log2(c - 1), tmp);
 243       __ add(tmp, left, result);
 244       return true;
 245     } else if (is_power_of_2(c + 1)) {
 246       __ shift_left(left, exact_log2(c + 1), tmp);
 247       __ sub(tmp, left, result);
 248       return true;
 249     }
 250   }
 251   return false;
 252 }
 253 
 254 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 255   BasicType type = item->type();
 256   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 257 }
 258 
 259 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info,
 260                                      ciMethod* profiled_method, int profiled_bci) {
 261     LIR_Opr tmp1 = new_register(objectType);
 262     LIR_Opr tmp2 = new_register(objectType);
 263     LIR_Opr tmp3 = new_register(objectType);
 264     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 265 }
 266 
 267 //----------------------------------------------------------------------
 268 //             visitor functions
 269 //----------------------------------------------------------------------
 270 
 271 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 272   assert(x->is_pinned(), "");
 273   LIRItem obj(x->obj(), this);
 274   obj.load_item();
 275 
 276   set_no_result(x);
 277 
 278   // "lock" stores the address of the monitor stack slot, so this is not an oop
 279   LIR_Opr lock = new_register(T_INT);
 280   // Need a tmp register for biased locking
 281   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 282   if (UseBiasedLocking) {
 283     tmp = new_register(T_INT);
 284   }
 285 
 286   CodeEmitInfo* info_for_exception = NULL;
 287   if (x->needs_null_check()) {
 288     info_for_exception = state_for(x);
 289   }
 290   // this CodeEmitInfo must not have the xhandlers because here the
 291   // object is already locked (xhandlers expect object to be unlocked)
 292   CodeEmitInfo* info = state_for(x, x->state(), true);
 293   monitor_enter(obj.result(), lock, syncTempOpr(), tmp,
 294                 x->monitor_no(), info_for_exception, info);
 295 }
 296 
 297 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 298   assert(x->is_pinned(), "");
 299 
 300   LIRItem obj(x->obj(), this);
 301   obj.dont_load_item();
 302 
 303   LIR_Opr lock = new_register(T_INT);
 304   LIR_Opr obj_temp = new_register(T_INT);
 305   set_no_result(x);
 306   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 307 }
 308 
 309 // neg
 310 void LIRGenerator::do_NegateOp(NegateOp* x) {
 311   LIRItem from(x->x(), this);
 312   from.load_item();
 313   LIR_Opr result = rlock_result(x);
 314   __ negate(from.result(), result);
 315 }
 316 
 317 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 318 //      _dadd, _dmul, _dsub, _ddiv, _drem
 319 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 320   LIRItem left(x->x(), this);
 321   LIRItem right(x->y(), this);
 322 
 323   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
 324 
 325     // float remainder is implemented as a direct call into the runtime
 326     BasicTypeList signature(2);
 327     if (x->op() == Bytecodes::_frem) {
 328       signature.append(T_FLOAT);
 329       signature.append(T_FLOAT);
 330     } else {
 331       signature.append(T_DOUBLE);
 332       signature.append(T_DOUBLE);
 333     }
 334     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 335 
 336     const LIR_Opr result_reg = result_register_for(x->type());
 337 
 338     left.load_item();
 339     __ move(left.result(), cc->at(0));
 340     right.load_item_force(cc->at(1));
 341 
 342     address entry;
 343     if (x->op() == Bytecodes::_frem) {
 344       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 345     } else {
 346       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 347     }
 348 
 349     LIR_Opr result = rlock_result(x);
 350     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 351     __ move(result_reg, result);
 352 
 353     return;
 354   }
 355 
 356   if (!left.is_register()) {
 357     left.load_item();
 358   }
 359   // Always load right hand side.
 360   right.load_item();
 361 
 362   LIR_Opr reg = rlock(x);
 363   arithmetic_op_fpu(x->op(), reg, left.result(), right.result());
 364 
 365   set_result(x, round_item(reg));
 366 }
 367 
 368 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 369 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 370 
 371   // missing test if instr is commutative and if we should swap
 372   LIRItem left(x->x(), this);
 373   LIRItem right(x->y(), this);
 374 
 375   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 376 
 377     left.load_item();
 378 
 379     bool need_zero_check = true;
 380     if (right.is_constant()) {
 381       jlong c = right.get_jlong_constant();
 382       // no need to do div-by-zero check if the divisor is a non-zero constant
 383       if (c != 0) { need_zero_check = false; }
 384       // do not load right if the divisor is a power-of-2 constant
 385       if (c > 0 && is_power_of_2(c)) {
 386         right.dont_load_item();
 387       } else {
 388         right.load_item();
 389       }
 390     } else {
 391       right.load_item();
 392     }
 393     if (need_zero_check) {
 394       CodeEmitInfo* info = state_for(x);
 395       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 396       __ branch(lir_cond_equal, new DivByZeroStub(info));
 397     }
 398 
 399     rlock_result(x);
 400     switch (x->op()) {
 401       case Bytecodes::_lrem:
 402         __ rem(left.result(), right.result(), x->operand());
 403         break;
 404       case Bytecodes::_ldiv:
 405         __ div(left.result(), right.result(), x->operand());
 406         break;
 407       default:
 408         ShouldNotReachHere();
 409     }
 410   } else {
 411     assert(x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
 412            "expect lmul, ladd or lsub");
 413     // add, sub, mul
 414     left.load_item();
 415     if (!right.is_register()) {
 416       if (x->op() == Bytecodes::_lmul ||
 417           !right.is_constant() ||
 418           (x->op() == Bytecodes::_ladd &&
 419           !Assembler::operand_valid_for_add_immediate(right.get_jlong_constant())) ||
 420           (x->op() == Bytecodes::_lsub &&
 421           !Assembler::operand_valid_for_add_immediate(-right.get_jlong_constant()))) {
 422             right.load_item();
 423       } else { // add, sub
 424         assert(x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expected ladd or lsub");
 425         // don't load constants to save register
 426         right.load_nonconstant();
 427       }
 428     }
 429     rlock_result(x);
 430     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 431   }
 432 }
 433 
 434 // for: _iadd, _imul, _isub, _idiv, _irem
 435 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 436 
 437   // Test if instr is commutative and if we should swap
 438   LIRItem left(x->x(),  this);
 439   LIRItem right(x->y(), this);
 440   LIRItem* left_arg = &left;
 441   LIRItem* right_arg = &right;
 442   if (x->is_commutative() && left.is_stack() && right.is_register()) {
 443     // swap them if left is real stack (or cached) and right is real register(not cached)
 444     left_arg = &right;
 445     right_arg = &left;
 446   }
 447   left_arg->load_item();
 448   // do not need to load right, as we can handle stack and constants
 449   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 450 
 451     rlock_result(x);
 452 
 453     bool need_zero_check = true;
 454     if (right.is_constant()) {
 455       jint c = right.get_jint_constant();
 456       // no need to do div-by-zero check if the divisor is a non-zero constant
 457       if (c != 0) { need_zero_check = false; }
 458       // do not load right if the divisor is a power-of-2 constant
 459       if (c > 0 && is_power_of_2(c)) {
 460         right_arg->dont_load_item();
 461       } else {
 462         right_arg->load_item();
 463       }
 464     } else {
 465       right_arg->load_item();
 466     }
 467     if (need_zero_check) {
 468       CodeEmitInfo* info = state_for(x);
 469       __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
 470       __ branch(lir_cond_equal, new DivByZeroStub(info));
 471     }
 472 
 473     LIR_Opr ill = LIR_OprFact::illegalOpr;
 474     if (x->op() == Bytecodes::_irem) {
 475       __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
 476     } else if (x->op() == Bytecodes::_idiv) {
 477       __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
 478     }
 479 
 480   } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
 481     if (right.is_constant() &&
 482         ((x->op() == Bytecodes::_iadd && !Assembler::operand_valid_for_add_immediate(right.get_jint_constant())) ||
 483          (x->op() == Bytecodes::_isub && !Assembler::operand_valid_for_add_immediate(-right.get_jint_constant())))) {
 484       right.load_nonconstant();
 485     } else {
 486       right.load_item();
 487     }
 488     rlock_result(x);
 489     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
 490   } else {
 491     assert (x->op() == Bytecodes::_imul, "expect imul");
 492     if (right.is_constant()) {
 493       jint c = right.get_jint_constant();
 494       if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
 495         right_arg->dont_load_item();
 496       } else {
 497         // Cannot use constant op.
 498         right_arg->load_item();
 499       }
 500     } else {
 501       right.load_item();
 502     }
 503     rlock_result(x);
 504     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
 505   }
 506 }
 507 
 508 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 509   // when an operand with use count 1 is the left operand, then it is
 510   // likely that no move for 2-operand-LIR-form is necessary
 511   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 512     x->swap_operands();
 513   }
 514 
 515   ValueTag tag = x->type()->tag();
 516   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 517   switch (tag) {
 518     case floatTag:
 519     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 520     case longTag:    do_ArithmeticOp_Long(x); return;
 521     case intTag:     do_ArithmeticOp_Int(x);  return;
 522     default:         ShouldNotReachHere();    return;
 523   }
 524 }
 525 
 526 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 527 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 528   LIRItem value(x->x(), this);
 529   LIRItem count(x->y(), this);
 530 
 531   value.load_item();
 532   if (count.is_constant()) {
 533     assert(count.type()->as_IntConstant() != NULL || count.type()->as_LongConstant() != NULL , "should be");
 534     count.dont_load_item();
 535   } else {
 536     count.load_item();
 537   }
 538 
 539   LIR_Opr res = rlock_result(x);
 540   shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr);
 541 }
 542 
 543 
 544 // _iand, _land, _ior, _lor, _ixor, _lxor
 545 void LIRGenerator::do_LogicOp(LogicOp* x) {
 546 
 547   LIRItem left(x->x(),  this);
 548   LIRItem right(x->y(), this);
 549 
 550   left.load_item();
 551   rlock_result(x);
 552   ValueTag tag = right.type()->tag();
 553   if (right.is_constant() &&
 554      ((tag == longTag && Assembler::operand_valid_for_add_immediate(right.get_jlong_constant())) ||
 555       (tag == intTag && Assembler::operand_valid_for_add_immediate(right.get_jint_constant()))))  {
 556     right.dont_load_item();
 557   } else {
 558     right.load_item();
 559   }
 560 
 561   switch (x->op()) {
 562     case Bytecodes::_iand:  // fall through
 563     case Bytecodes::_land:
 564       __ logical_and(left.result(), right.result(), x->operand()); break;
 565     case Bytecodes::_ior:   // fall through
 566     case Bytecodes::_lor:
 567       __ logical_or(left.result(), right.result(), x->operand()); break;
 568     case Bytecodes::_ixor:  // fall through
 569     case Bytecodes::_lxor:
 570       __ logical_xor(left.result(), right.result(), x->operand()); break;
 571     default: Unimplemented();
 572   }
 573 }
 574 
 575 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 576 void LIRGenerator::do_CompareOp(CompareOp* x) {
 577   LIRItem left(x->x(), this);
 578   LIRItem right(x->y(), this);
 579   ValueTag tag = x->x()->type()->tag();
 580   if (tag == longTag) {
 581     left.set_destroys_register();
 582   }
 583   left.load_item();
 584   right.load_item();
 585   LIR_Opr reg = rlock_result(x);
 586 
 587   if (x->x()->type()->is_float_kind()) {
 588     Bytecodes::Code code = x->op();
 589     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 590   } else if (x->x()->type()->tag() == longTag) {
 591     __ lcmp2int(left.result(), right.result(), reg);
 592   } else {
 593     Unimplemented();
 594   }
 595 }
 596 
 597 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 598   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 599   new_value.load_item();
 600   cmp_value.load_item();
 601   LIR_Opr result = new_register(T_INT);
 602   if (is_reference_type(type)) {
 603     __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
 604   } else if (type == T_INT) {
 605     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 606   } else if (type == T_LONG) {
 607     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 608   } else {
 609     ShouldNotReachHere();
 610   }
 611   __ logical_xor(FrameMap::r5_opr, LIR_OprFact::intConst(1), result);
 612   return result;
 613 }
 614 
 615 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 616   bool is_oop = is_reference_type(type);
 617   LIR_Opr result = new_register(type);
 618   value.load_item();
 619   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
 620   LIR_Opr tmp = new_register(T_INT);
 621   __ xchg(addr, value.result(), result, tmp);
 622   return result;
 623 }
 624 
 625 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 626   LIR_Opr result = new_register(type);
 627   value.load_item();
 628   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
 629   LIR_Opr tmp = new_register(T_INT);
 630   __ xadd(addr, value.result(), result, tmp);
 631   return result;
 632 }
 633 
 634 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 635   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow),
 636          "wrong type");
 637 
 638   switch (x->id()) {
 639     case vmIntrinsics::_dexp: // fall through
 640     case vmIntrinsics::_dlog: // fall through
 641     case vmIntrinsics::_dpow: // fall through
 642     case vmIntrinsics::_dcos: // fall through
 643     case vmIntrinsics::_dsin: // fall through
 644     case vmIntrinsics::_dtan: // fall through
 645     case vmIntrinsics::_dlog10:
 646       do_LibmIntrinsic(x);
 647       break;
 648     case vmIntrinsics::_dabs: // fall through
 649     case vmIntrinsics::_dsqrt: {
 650       assert(x->number_of_arguments() == 1, "wrong type");
 651       LIRItem value(x->argument_at(0), this);
 652       value.load_item();
 653       LIR_Opr dst = rlock_result(x);
 654 
 655       switch (x->id()) {
 656         case vmIntrinsics::_dsqrt: {
 657           __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 658           break;
 659         }
 660         case vmIntrinsics::_dabs: {
 661           __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 662           break;
 663         }
 664         default:
 665           ShouldNotReachHere();
 666       }
 667       break;
 668     }
 669     default:
 670       ShouldNotReachHere();
 671   }
 672 }
 673 
 674 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
 675   LIRItem value(x->argument_at(0), this);
 676   value.set_destroys_register();
 677 
 678   LIR_Opr calc_result = rlock_result(x);
 679   LIR_Opr result_reg = result_register_for(x->type());
 680 
 681   CallingConvention* cc = NULL;
 682 
 683   if (x->id() == vmIntrinsics::_dpow) {
 684     LIRItem value1(x->argument_at(1), this);
 685 
 686     value1.set_destroys_register();
 687 
 688     BasicTypeList signature(2);
 689     signature.append(T_DOUBLE);
 690     signature.append(T_DOUBLE);
 691     cc = frame_map()->c_calling_convention(&signature);
 692     value.load_item_force(cc->at(0));
 693     value1.load_item_force(cc->at(1));
 694   } else {
 695     BasicTypeList signature(1);
 696     signature.append(T_DOUBLE);
 697     cc = frame_map()->c_calling_convention(&signature);
 698     value.load_item_force(cc->at(0));
 699   }
 700 
 701   switch (x->id()) {
 702     case vmIntrinsics::_dexp:
 703       if (StubRoutines::dexp() != NULL) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); }
 704       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); }
 705       break;
 706     case vmIntrinsics::_dlog:
 707       if (StubRoutines::dlog() != NULL) {  __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); }
 708       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); }
 709       break;
 710     case vmIntrinsics::_dlog10:
 711       if (StubRoutines::dlog10() != NULL) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); }
 712       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); }
 713       break;
 714     case vmIntrinsics::_dsin:
 715       if (StubRoutines::dsin() != NULL) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); }
 716       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); }
 717       break;
 718     case vmIntrinsics::_dcos:
 719       if (StubRoutines::dcos() != NULL) {  __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); }
 720       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); }
 721       break;
 722     case vmIntrinsics::_dtan:
 723       if (StubRoutines::dtan() != NULL) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); }
 724       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); }
 725       break;
 726     case vmIntrinsics::_dpow:
 727       if (StubRoutines::dpow() != NULL) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); }
 728       else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); }
 729       break;
 730     default:  ShouldNotReachHere();
 731   }
 732   __ move(result_reg, calc_result);
 733 }
 734 
 735 
 736 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 737   assert(x->number_of_arguments() == 5, "wrong type");
 738 
 739   // Make all state_for calls early since they can emit code
 740   CodeEmitInfo* info = state_for(x, x->state());
 741 
 742   LIRItem src(x->argument_at(0), this);
 743   LIRItem src_pos(x->argument_at(1), this);
 744   LIRItem dst(x->argument_at(2), this);
 745   LIRItem dst_pos(x->argument_at(3), this);
 746   LIRItem length(x->argument_at(4), this);
 747 
 748   // operands for arraycopy must use fixed registers, otherwise
 749   // LinearScan will fail allocation (because arraycopy always needs a
 750   // call)
 751 
 752   // The java calling convention will give us enough registers
 753   // so that on the stub side the args will be perfect already.
 754   // On the other slow/special case side we call C and the arg
 755   // positions are not similar enough to pick one as the best.
 756   // Also because the java calling convention is a "shifted" version
 757   // of the C convention we can process the java args trivially into C
 758   // args without worry of overwriting during the xfer
 759 
 760   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 761   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 762   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 763   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 764   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 765 
 766   LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
 767 
 768   set_no_result(x);
 769 
 770   int flags;
 771   ciArrayKlass* expected_type = NULL;
 772   arraycopy_helper(x, &flags, &expected_type);
 773 
 774   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp,
 775                expected_type, flags, info); // does add_safepoint
 776 }
 777 
 778 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 779   ShouldNotReachHere();
 780 }
 781 
 782 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
 783   ShouldNotReachHere();
 784 }
 785 
 786 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
 787   assert(x->number_of_arguments() == 3, "wrong type");
 788   assert(UseFMA, "Needs FMA instructions support.");
 789   LIRItem value(x->argument_at(0), this);
 790   LIRItem value1(x->argument_at(1), this);
 791   LIRItem value2(x->argument_at(2), this);
 792 
 793   value.load_item();
 794   value1.load_item();
 795   value2.load_item();
 796 
 797   LIR_Opr calc_input = value.result();
 798   LIR_Opr calc_input1 = value1.result();
 799   LIR_Opr calc_input2 = value2.result();
 800   LIR_Opr calc_result = rlock_result(x);
 801 
 802   switch (x->id()) {
 803     case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
 804     case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
 805     default:                    ShouldNotReachHere();
 806   }
 807 }
 808 
 809 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
 810   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
 811 }
 812 
 813 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 814 // _i2b, _i2c, _i2s
 815 void LIRGenerator::do_Convert(Convert* x) {
 816   LIRItem value(x->value(), this);
 817   value.load_item();
 818   LIR_Opr input = value.result();
 819   LIR_Opr result = rlock(x);
 820 
 821   // arguments of lir_convert
 822   LIR_Opr conv_input = input;
 823   LIR_Opr conv_result = result;
 824 
 825   __ convert(x->op(), conv_input, conv_result);
 826 
 827   assert(result->is_virtual(), "result must be virtual register");
 828   set_result(x, result);
 829 }
 830 
 831 void LIRGenerator::do_NewInstance(NewInstance* x) {
 832 #ifndef PRODUCT
 833   if (PrintNotLoaded && !x->klass()->is_loaded()) {
 834     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
 835   }
 836 #endif
 837   CodeEmitInfo* info = state_for(x, x->state());
 838   LIR_Opr reg = result_register_for(x->type());
 839   new_instance(reg, x->klass(), x->is_unresolved(),
 840                FrameMap::r12_oop_opr,
 841                FrameMap::r15_oop_opr,
 842                FrameMap::r14_oop_opr,
 843                LIR_OprFact::illegalOpr,
 844                FrameMap::r13_metadata_opr,
 845                info);
 846   LIR_Opr result = rlock_result(x);
 847   __ move(reg, result);
 848 }
 849 
 850 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 851   CodeEmitInfo* info = state_for(x, x->state());
 852 
 853   LIRItem length(x->length(), this);
 854   length.load_item_force(FrameMap::r9_opr);
 855 
 856   LIR_Opr reg = result_register_for(x->type());
 857   LIR_Opr tmp1 = FrameMap::r12_oop_opr;
 858   LIR_Opr tmp2 = FrameMap::r14_oop_opr;
 859   LIR_Opr tmp3 = FrameMap::r15_oop_opr;
 860   LIR_Opr tmp4 = reg;
 861   LIR_Opr klass_reg = FrameMap::r13_metadata_opr;
 862   LIR_Opr len = length.result();
 863   BasicType elem_type = x->elt_type();
 864 
 865   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 866 
 867   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
 868   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
 869 
 870   LIR_Opr result = rlock_result(x);
 871   __ move(reg, result);
 872 }
 873 
 874 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
 875   LIRItem length(x->length(), this);
 876   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
 877   // and therefore provide the state before the parameters have been consumed
 878   CodeEmitInfo* patching_info = NULL;
 879   if (!x->klass()->is_loaded() || PatchALot) {
 880     patching_info =  state_for(x, x->state_before());
 881   }
 882 
 883   CodeEmitInfo* info = state_for(x, x->state());
 884 
 885   LIR_Opr reg = result_register_for(x->type());
 886   LIR_Opr tmp1 = FrameMap::r12_oop_opr;
 887   LIR_Opr tmp2 = FrameMap::r14_oop_opr;
 888   LIR_Opr tmp3 = FrameMap::r15_oop_opr;
 889   LIR_Opr tmp4 = reg;
 890   LIR_Opr klass_reg = FrameMap::r13_metadata_opr;
 891 
 892   length.load_item_force(FrameMap::r9_opr);
 893   LIR_Opr len = length.result();
 894 
 895   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
 896   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
 897   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
 898     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
 899   }
 900   klass2reg_with_patching(klass_reg, obj, patching_info);
 901   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
 902 
 903   LIR_Opr result = rlock_result(x);
 904   __ move(reg, result);
 905 }
 906 
 907 
 908 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
 909   Values* dims = x->dims();
 910   int i = dims->length();
 911   LIRItemList* items = new LIRItemList(i, i, NULL);
 912   while (i-- > 0) {
 913     LIRItem* size = new LIRItem(dims->at(i), this);
 914     items->at_put(i, size);
 915   }
 916 
 917   // Evaluate state_for early since it may emit code.
 918   CodeEmitInfo* patching_info = NULL;
 919   if (!x->klass()->is_loaded() || PatchALot) {
 920     patching_info = state_for(x, x->state_before());
 921 
 922     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
 923     // clone all handlers (NOTE: Usually this is handled transparently
 924     // by the CodeEmitInfo cloning logic in CodeStub constructors but
 925     // is done explicitly here because a stub isn't being used).
 926     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
 927   }
 928   CodeEmitInfo* info = state_for(x, x->state());
 929 
 930   i = dims->length();
 931   while (i-- > 0) {
 932     LIRItem* size = items->at(i);
 933     size->load_item();
 934 
 935     store_stack_parameter(size->result(), in_ByteSize(i * BytesPerInt));
 936   }
 937 
 938   LIR_Opr klass_reg = FrameMap::r10_metadata_opr;
 939   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
 940 
 941   LIR_Opr rank = FrameMap::r9_opr;
 942   __ move(LIR_OprFact::intConst(x->rank()), rank);
 943   LIR_Opr varargs = FrameMap::r12_opr;
 944   __ move(FrameMap::sp_opr, varargs);
 945   LIR_OprList* args = new LIR_OprList(3);
 946   args->append(klass_reg);
 947   args->append(rank);
 948   args->append(varargs);
 949   LIR_Opr reg = result_register_for(x->type());
 950   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
 951                   LIR_OprFact::illegalOpr,
 952                   reg, args, info);
 953 
 954   LIR_Opr result = rlock_result(x);
 955   __ move(reg, result);
 956 }
 957 
 958 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
 959   // nothing to do for now
 960 }
 961 
 962 void LIRGenerator::do_CheckCast(CheckCast* x) {
 963   LIRItem obj(x->obj(), this);
 964 
 965   CodeEmitInfo* patching_info = NULL;
 966   if (!x->klass()->is_loaded() ||
 967       (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
 968     // must do this before locking the destination register as an oop register,
 969     // and before the obj is loaded (the latter is for deoptimization)
 970     patching_info = state_for(x, x->state_before());
 971   }
 972   obj.load_item();
 973 
 974   // info for exceptions
 975   CodeEmitInfo* info_for_exception =
 976       (x->needs_exception_state() ? state_for(x) :
 977                                     state_for(x, x->state_before(), true /*ignore_xhandler*/ ));
 978 
 979   CodeStub* stub = NULL;
 980   if (x->is_incompatible_class_change_check()) {
 981     assert(patching_info == NULL, "can't patch this");
 982     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr,
 983                                    info_for_exception);
 984   } else if (x->is_invokespecial_receiver_check()) {
 985     assert(patching_info == NULL, "can't patch this");
 986     stub = new DeoptimizeStub(info_for_exception,
 987                               Deoptimization::Reason_class_check,
 988                               Deoptimization::Action_none);
 989   } else {
 990     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
 991   }
 992   LIR_Opr reg = rlock_result(x);
 993   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 994   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
 995     tmp3 = new_register(objectType);
 996   }
 997   __ checkcast(reg, obj.result(), x->klass(),
 998                new_register(objectType), new_register(objectType), tmp3,
 999                x->direct_compare(), info_for_exception, patching_info, stub,
1000                x->profiled_method(), x->profiled_bci());
1001 }
1002 
1003 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1004   LIRItem obj(x->obj(), this);
1005 
1006   // result and test object may not be in same register
1007   LIR_Opr reg = rlock_result(x);
1008   CodeEmitInfo* patching_info = NULL;
1009   if ((!x->klass()->is_loaded() || PatchALot)) {
1010     // must do this before locking the destination register as an oop register
1011     patching_info = state_for(x, x->state_before());
1012   }
1013   obj.load_item();
1014   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1015   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1016     tmp3 = new_register(objectType);
1017   }
1018   __ instanceof(reg, obj.result(), x->klass(),
1019                 new_register(objectType), new_register(objectType), tmp3,
1020                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1021 }
1022 
1023 void LIRGenerator::do_If(If* x) {
1024   // If should have two successors
1025   assert(x->number_of_sux() == 2, "inconsistency");
1026   ValueTag tag = x->x()->type()->tag();
1027   bool is_safepoint = x->is_safepoint();
1028 
1029   If::Condition cond = x->cond();
1030 
1031   LIRItem xitem(x->x(), this);
1032   LIRItem yitem(x->y(), this);
1033   LIRItem* xin = &xitem;
1034   LIRItem* yin = &yitem;
1035 
1036   if (tag == longTag) {
1037     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1038     // mirror for other conditions
1039     if (cond == If::gtr || cond == If::leq) {
1040       cond = Instruction::mirror(cond);
1041       xin = &yitem;
1042       yin = &xitem;
1043     }
1044     xin->set_destroys_register();
1045   }
1046   xin->load_item();
1047   yin->load_item();
1048 
1049   set_no_result(x);
1050 
1051   LIR_Opr left = xin->result();
1052   LIR_Opr right = yin->result();
1053 
1054   // add safepoint before generating condition code so it can be recomputed
1055   if (x->is_safepoint()) {
1056     // increment backedge counter if needed
1057     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1058                                              x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1059     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1060   }
1061 
1062   // Generate branch profiling. Profiling code doesn't kill flags.
1063   __ cmp(lir_cond(cond), left, right);
1064   profile_branch(x, cond);
1065   move_to_phi(x->state());
1066   if (x->x()->type()->is_float_kind()) {
1067     __ branch(lir_cond(cond), x->tsux(), x->usux());
1068   } else {
1069     __ branch(lir_cond(cond), x->tsux());
1070   }
1071   assert(x->default_sux() == x->fsux(), "wrong destination above");
1072   __ jump(x->default_sux());
1073 }
1074 
1075 LIR_Opr LIRGenerator::getThreadPointer() {
1076    return FrameMap::as_pointer_opr(xthread);
1077 }
1078 
1079 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1080 
1081 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1082                                         CodeEmitInfo* info) {
1083   __ volatile_store_mem_reg(value, address, info);
1084 }
1085 
1086 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1087                                        CodeEmitInfo* info) {
1088   __ volatile_load_mem_reg(address, result, info);
1089 }