1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArray.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "compiler/compilerDefinitions.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 #include "vmreg_aarch64.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 // Item will be loaded into a byte register; Intel only
  51 void LIRItem::load_byte_item() {
  52   load_item();
  53 }
  54 
  55 
  56 void LIRItem::load_nonconstant() {
  57   LIR_Opr r = value()->operand();
  58   if (r->is_constant()) {
  59     _result = r;
  60   } else {
  61     load_item();
  62   }
  63 }
  64 
  65 //--------------------------------------------------------------
  66 //               LIRGenerator
  67 //--------------------------------------------------------------
  68 
  69 
  70 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
  71 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r3_opr; }
  72 LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
  73 LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  74 LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  75 LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
  76 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
  77 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r0_opr; }
  78 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  79 
  80 
  81 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  82   LIR_Opr opr;
  83   switch (type->tag()) {
  84     case intTag:     opr = FrameMap::r0_opr;          break;
  85     case objectTag:  opr = FrameMap::r0_oop_opr;      break;
  86     case longTag:    opr = FrameMap::long0_opr;        break;
  87     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  88     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  89 
  90     case addressTag:
  91     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  92   }
  93 
  94   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  95   return opr;
  96 }
  97 
  98 
  99 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 100   LIR_Opr reg = new_register(T_INT);
 101   set_vreg_flag(reg, LIRGenerator::byte_reg);
 102   return reg;
 103 }
 104 
 105 
 106 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
 107   tmp1 = new_register(T_INT);
 108   tmp2 = LIR_OprFact::illegalOpr;
 109 }
 110 
 111 
 112 //--------- loading items into registers --------------------------------
 113 
 114 
 115 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 116   if (v->type()->as_IntConstant() != nullptr) {
 117     return v->type()->as_IntConstant()->value() == 0L;
 118   } else if (v->type()->as_LongConstant() != nullptr) {
 119     return v->type()->as_LongConstant()->value() == 0L;
 120   } else if (v->type()->as_ObjectConstant() != nullptr) {
 121     return v->type()->as_ObjectConstant()->value()->is_null_object();
 122   } else {
 123     return false;
 124   }
 125 }
 126 
 127 bool LIRGenerator::can_inline_as_constant(Value v) const {
 128   // FIXME: Just a guess
 129   if (v->type()->as_IntConstant() != nullptr) {
 130     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 131   } else if (v->type()->as_LongConstant() != nullptr) {
 132     return v->type()->as_LongConstant()->value() == 0L;
 133   } else if (v->type()->as_ObjectConstant() != nullptr) {
 134     return v->type()->as_ObjectConstant()->value()->is_null_object();
 135   } else {
 136     return false;
 137   }
 138 }
 139 
 140 
 141 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
 142 
 143 
 144 LIR_Opr LIRGenerator::safepoint_poll_register() {
 145   return LIR_OprFact::illegalOpr;
 146 }
 147 
 148 
 149 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 150                                             int shift, int disp, BasicType type) {
 151   assert(base->is_register(), "must be");
 152   intx large_disp = disp;
 153 
 154   // accumulate fixed displacements
 155   if (index->is_constant()) {
 156     LIR_Const *constant = index->as_constant_ptr();
 157     if (constant->type() == T_INT) {
 158       large_disp += ((intx)index->as_jint()) << shift;
 159     } else {
 160       assert(constant->type() == T_LONG, "should be");
 161       jlong c = index->as_jlong() << shift;
 162       if ((jlong)((jint)c) == c) {
 163         large_disp += c;
 164         index = LIR_OprFact::illegalOpr;
 165       } else {
 166         LIR_Opr tmp = new_register(T_LONG);
 167         __ move(index, tmp);
 168         index = tmp;
 169         // apply shift and displacement below
 170       }
 171     }
 172   }
 173 
 174   if (index->is_register()) {
 175     // apply the shift and accumulate the displacement
 176     if (shift > 0) {
 177       // Use long register to avoid overflow when shifting large index values left.
 178       LIR_Opr tmp = new_register(T_LONG);
 179       __ convert(Bytecodes::_i2l, index, tmp);
 180       __ shift_left(tmp, shift, tmp);
 181       index = tmp;
 182     }
 183     if (large_disp != 0) {
 184       LIR_Opr tmp = new_pointer_register();
 185       if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
 186         __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
 187         index = tmp;
 188       } else {
 189         __ move(LIR_OprFact::intptrConst(large_disp), tmp);
 190         __ add(tmp, index, tmp);
 191         index = tmp;
 192       }
 193       large_disp = 0;
 194     }
 195   } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
 196     // index is illegal so replace it with the displacement loaded into a register
 197     index = new_pointer_register();
 198     __ move(LIR_OprFact::intptrConst(large_disp), index);
 199     large_disp = 0;
 200   }
 201 
 202   // at this point we either have base + index or base + displacement
 203   if (large_disp == 0 && index->is_register()) {
 204     return new LIR_Address(base, index, type);
 205   } else {
 206     assert(Address::offset_ok_for_immed(large_disp, shift), "failed for large_disp: " INTPTR_FORMAT " and shift %d", large_disp, shift);
 207     return new LIR_Address(base, large_disp, type);
 208   }
 209 }
 210 
 211 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 212                                               BasicType type) {
 213   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 214   int elem_size = type2aelembytes(type);
 215   int shift = exact_log2(elem_size);
 216   return generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
 217 }
 218 
 219 LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) {
 220   LIR_Opr r;
 221   if (type == T_LONG) {
 222     r = LIR_OprFact::longConst(x);
 223     if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
 224       LIR_Opr tmp = new_register(type);
 225       __ move(r, tmp);
 226       return tmp;
 227     }
 228   } else if (type == T_INT) {
 229     r = LIR_OprFact::intConst(checked_cast<jint>(x));
 230     if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
 231       // This is all rather nasty.  We don't know whether our constant
 232       // is required for a logical or an arithmetic operation, wo we
 233       // don't know what the range of valid values is!!
 234       LIR_Opr tmp = new_register(type);
 235       __ move(r, tmp);
 236       return tmp;
 237     }
 238   } else {
 239     ShouldNotReachHere();
 240   }
 241   return r;
 242 }
 243 
 244 
 245 
 246 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 247   LIR_Opr pointer = new_pointer_register();
 248   __ move(LIR_OprFact::intptrConst(counter), pointer);
 249   LIR_Address* addr = new LIR_Address(pointer, type);
 250   increment_counter(addr, step);
 251 }
 252 
 253 
 254 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 255   LIR_Opr imm;
 256   switch(addr->type()) {
 257   case T_INT:
 258     imm = LIR_OprFact::intConst(step);
 259     break;
 260   case T_LONG:
 261     imm = LIR_OprFact::longConst(step);
 262     break;
 263   default:
 264     ShouldNotReachHere();
 265   }
 266   LIR_Opr reg = new_register(addr->type());
 267   __ load(addr, reg);
 268   __ add(reg, imm, reg);
 269   __ store(reg, addr);
 270 }
 271 
 272 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 273   LIR_Opr reg = new_register(T_INT);
 274   __ load(generate_address(base, disp, T_INT), reg, info);
 275   __ cmp(condition, reg, LIR_OprFact::intConst(c));
 276 }
 277 
 278 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 279   LIR_Opr reg1 = new_register(T_INT);
 280   __ load(generate_address(base, disp, type), reg1, info);
 281   __ cmp(condition, reg, reg1);
 282 }
 283 
 284 
 285 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
 286   juint u_value = (juint)c;
 287   if (is_power_of_2(u_value - 1)) {
 288     __ shift_left(left, exact_log2(u_value - 1), tmp);
 289     __ add(tmp, left, result);
 290     return true;
 291   } else if (is_power_of_2(u_value + 1)) {
 292     __ shift_left(left, exact_log2(u_value + 1), tmp);
 293     __ sub(tmp, left, result);
 294     return true;
 295   } else if (c == -1) {
 296     __ negate(left, result);
 297     return true;
 298   }
 299   return false;
 300 }
 301 
 302 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 303   BasicType type = item->type();
 304   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 305 }
 306 
 307 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 308     LIR_Opr tmp1 = new_register(objectType);
 309     LIR_Opr tmp2 = new_register(objectType);
 310     LIR_Opr tmp3 = new_register(objectType);
 311     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 312 }
 313 
 314 //----------------------------------------------------------------------
 315 //             visitor functions
 316 //----------------------------------------------------------------------
 317 
 318 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 319   assert(x->is_pinned(),"");
 320   LIRItem obj(x->obj(), this);
 321   obj.load_item();
 322 
 323   set_no_result(x);
 324 
 325   // "lock" stores the address of the monitor stack slot, so this is not an oop
 326   LIR_Opr lock = new_register(T_INT);
 327   LIR_Opr scratch = new_register(T_INT);
 328 
 329   CodeEmitInfo* info_for_exception = nullptr;
 330   if (x->needs_null_check()) {
 331     info_for_exception = state_for(x);
 332   }
 333 
 334   CodeStub* throw_ie_stub =
 335       x->maybe_inlinetype() ?
 336       new SimpleExceptionStub(C1StubId::throw_identity_exception_id, obj.result(), state_for(x)) :
 337       nullptr;
 338 
 339   // this CodeEmitInfo must not have the xhandlers because here the
 340   // object is already locked (xhandlers expect object to be unlocked)
 341   CodeEmitInfo* info = state_for(x, x->state(), true);
 342   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 343                 x->monitor_no(), info_for_exception, info, throw_ie_stub);
 344 }
 345 
 346 
 347 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 348   assert(x->is_pinned(),"");
 349 
 350   LIRItem obj(x->obj(), this);
 351   obj.dont_load_item();
 352 
 353   LIR_Opr lock = new_register(T_INT);
 354   LIR_Opr obj_temp = new_register(T_INT);
 355   LIR_Opr scratch = new_register(T_INT);
 356   set_no_result(x);
 357   monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
 358 }
 359 
 360 void LIRGenerator::do_NegateOp(NegateOp* x) {
 361 
 362   LIRItem from(x->x(), this);
 363   from.load_item();
 364   LIR_Opr result = rlock_result(x);
 365   __ negate (from.result(), result);
 366 
 367 }
 368 
 369 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 370 //      _dadd, _dmul, _dsub, _ddiv, _drem
 371 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 372 
 373   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
 374     // float remainder is implemented as a direct call into the runtime
 375     LIRItem right(x->x(), this);
 376     LIRItem left(x->y(), this);
 377 
 378     BasicTypeList signature(2);
 379     if (x->op() == Bytecodes::_frem) {
 380       signature.append(T_FLOAT);
 381       signature.append(T_FLOAT);
 382     } else {
 383       signature.append(T_DOUBLE);
 384       signature.append(T_DOUBLE);
 385     }
 386     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 387 
 388     const LIR_Opr result_reg = result_register_for(x->type());
 389     left.load_item_force(cc->at(1));
 390     right.load_item();
 391 
 392     __ move(right.result(), cc->at(0));
 393 
 394     address entry;
 395     if (x->op() == Bytecodes::_frem) {
 396       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 397     } else {
 398       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 399     }
 400 
 401     LIR_Opr result = rlock_result(x);
 402     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 403     __ move(result_reg, result);
 404 
 405     return;
 406   }
 407 
 408   LIRItem left(x->x(),  this);
 409   LIRItem right(x->y(), this);
 410   LIRItem* left_arg  = &left;
 411   LIRItem* right_arg = &right;
 412 
 413   // Always load right hand side.
 414   right.load_item();
 415 
 416   if (!left.is_register())
 417     left.load_item();
 418 
 419   LIR_Opr reg = rlock(x);
 420 
 421   arithmetic_op_fpu(x->op(), reg, left.result(), right.result());
 422 
 423   set_result(x, round_item(reg));
 424 }
 425 
 426 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 427 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 428 
 429   // missing test if instr is commutative and if we should swap
 430   LIRItem left(x->x(), this);
 431   LIRItem right(x->y(), this);
 432 
 433   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 434 
 435     left.load_item();
 436     bool need_zero_check = true;
 437     if (right.is_constant()) {
 438       jlong c = right.get_jlong_constant();
 439       // no need to do div-by-zero check if the divisor is a non-zero constant
 440       if (c != 0) need_zero_check = false;
 441       // do not load right if the divisor is a power-of-2 constant
 442       if (c > 0 && is_power_of_2(c)) {
 443         right.dont_load_item();
 444       } else {
 445         right.load_item();
 446       }
 447     } else {
 448       right.load_item();
 449     }
 450     if (need_zero_check) {
 451       CodeEmitInfo* info = state_for(x);
 452       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 453       __ branch(lir_cond_equal, new DivByZeroStub(info));
 454     }
 455 
 456     rlock_result(x);
 457     switch (x->op()) {
 458     case Bytecodes::_lrem:
 459       __ rem (left.result(), right.result(), x->operand());
 460       break;
 461     case Bytecodes::_ldiv:
 462       __ div (left.result(), right.result(), x->operand());
 463       break;
 464     default:
 465       ShouldNotReachHere();
 466       break;
 467     }
 468 
 469 
 470   } else {
 471     assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
 472             "expect lmul, ladd or lsub");
 473     // add, sub, mul
 474     left.load_item();
 475     if (! right.is_register()) {
 476       if (x->op() == Bytecodes::_lmul
 477           || ! right.is_constant()
 478           || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
 479         right.load_item();
 480       } else { // add, sub
 481         assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
 482         // don't load constants to save register
 483         right.load_nonconstant();
 484       }
 485     }
 486     rlock_result(x);
 487     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
 488   }
 489 }
 490 
 491 // for: _iadd, _imul, _isub, _idiv, _irem
 492 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 493 
 494   // Test if instr is commutative and if we should swap
 495   LIRItem left(x->x(),  this);
 496   LIRItem right(x->y(), this);
 497   LIRItem* left_arg = &left;
 498   LIRItem* right_arg = &right;
 499   if (x->is_commutative() && left.is_stack() && right.is_register()) {
 500     // swap them if left is real stack (or cached) and right is real register(not cached)
 501     left_arg = &right;
 502     right_arg = &left;
 503   }
 504 
 505   left_arg->load_item();
 506 
 507   // do not need to load right, as we can handle stack and constants
 508   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 509 
 510     rlock_result(x);
 511     bool need_zero_check = true;
 512     if (right.is_constant()) {
 513       jint c = right.get_jint_constant();
 514       // no need to do div-by-zero check if the divisor is a non-zero constant
 515       if (c != 0) need_zero_check = false;
 516       // do not load right if the divisor is a power-of-2 constant
 517       if (c > 0 && is_power_of_2(c)) {
 518         right_arg->dont_load_item();
 519       } else {
 520         right_arg->load_item();
 521       }
 522     } else {
 523       right_arg->load_item();
 524     }
 525     if (need_zero_check) {
 526       CodeEmitInfo* info = state_for(x);
 527       __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
 528       __ branch(lir_cond_equal, new DivByZeroStub(info));
 529     }
 530 
 531     LIR_Opr ill = LIR_OprFact::illegalOpr;
 532     if (x->op() == Bytecodes::_irem) {
 533       __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
 534     } else if (x->op() == Bytecodes::_idiv) {
 535       __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
 536     }
 537 
 538   } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
 539     if (right.is_constant()
 540         && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
 541       right.load_nonconstant();
 542     } else {
 543       right.load_item();
 544     }
 545     rlock_result(x);
 546     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
 547   } else {
 548     assert (x->op() == Bytecodes::_imul, "expect imul");
 549     if (right.is_constant()) {
 550       jint c = right.get_jint_constant();
 551       if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
 552         right_arg->dont_load_item();
 553       } else {
 554         // Cannot use constant op.
 555         right_arg->load_item();
 556       }
 557     } else {
 558       right.load_item();
 559     }
 560     rlock_result(x);
 561     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
 562   }
 563 }
 564 
 565 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 566   // when an operand with use count 1 is the left operand, then it is
 567   // likely that no move for 2-operand-LIR-form is necessary
 568   if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
 569     x->swap_operands();
 570   }
 571 
 572   ValueTag tag = x->type()->tag();
 573   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 574   switch (tag) {
 575     case floatTag:
 576     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 577     case longTag:    do_ArithmeticOp_Long(x); return;
 578     case intTag:     do_ArithmeticOp_Int(x);  return;
 579     default:         ShouldNotReachHere();    return;
 580   }
 581 }
 582 
 583 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 584 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 585 
 586   LIRItem left(x->x(),  this);
 587   LIRItem right(x->y(), this);
 588 
 589   left.load_item();
 590 
 591   rlock_result(x);
 592   if (right.is_constant()) {
 593     right.dont_load_item();
 594 
 595     switch (x->op()) {
 596     case Bytecodes::_ishl: {
 597       int c = right.get_jint_constant() & 0x1f;
 598       __ shift_left(left.result(), c, x->operand());
 599       break;
 600     }
 601     case Bytecodes::_ishr: {
 602       int c = right.get_jint_constant() & 0x1f;
 603       __ shift_right(left.result(), c, x->operand());
 604       break;
 605     }
 606     case Bytecodes::_iushr: {
 607       int c = right.get_jint_constant() & 0x1f;
 608       __ unsigned_shift_right(left.result(), c, x->operand());
 609       break;
 610     }
 611     case Bytecodes::_lshl: {
 612       int c = right.get_jint_constant() & 0x3f;
 613       __ shift_left(left.result(), c, x->operand());
 614       break;
 615     }
 616     case Bytecodes::_lshr: {
 617       int c = right.get_jint_constant() & 0x3f;
 618       __ shift_right(left.result(), c, x->operand());
 619       break;
 620     }
 621     case Bytecodes::_lushr: {
 622       int c = right.get_jint_constant() & 0x3f;
 623       __ unsigned_shift_right(left.result(), c, x->operand());
 624       break;
 625     }
 626     default:
 627       ShouldNotReachHere();
 628     }
 629   } else {
 630     right.load_item();
 631     LIR_Opr tmp = new_register(T_INT);
 632     switch (x->op()) {
 633     case Bytecodes::_ishl: {
 634       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 635       __ shift_left(left.result(), tmp, x->operand(), tmp);
 636       break;
 637     }
 638     case Bytecodes::_ishr: {
 639       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 640       __ shift_right(left.result(), tmp, x->operand(), tmp);
 641       break;
 642     }
 643     case Bytecodes::_iushr: {
 644       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 645       __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
 646       break;
 647     }
 648     case Bytecodes::_lshl: {
 649       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 650       __ shift_left(left.result(), tmp, x->operand(), tmp);
 651       break;
 652     }
 653     case Bytecodes::_lshr: {
 654       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 655       __ shift_right(left.result(), tmp, x->operand(), tmp);
 656       break;
 657     }
 658     case Bytecodes::_lushr: {
 659       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 660       __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
 661       break;
 662     }
 663     default:
 664       ShouldNotReachHere();
 665     }
 666   }
 667 }
 668 
 669 // _iand, _land, _ior, _lor, _ixor, _lxor
 670 void LIRGenerator::do_LogicOp(LogicOp* x) {
 671 
 672   LIRItem left(x->x(),  this);
 673   LIRItem right(x->y(), this);
 674 
 675   left.load_item();
 676 
 677   rlock_result(x);
 678   if (right.is_constant()
 679       && ((right.type()->tag() == intTag
 680            && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
 681           || (right.type()->tag() == longTag
 682               && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant()))))  {
 683     right.dont_load_item();
 684   } else {
 685     right.load_item();
 686   }
 687   switch (x->op()) {
 688   case Bytecodes::_iand:
 689   case Bytecodes::_land:
 690     __ logical_and(left.result(), right.result(), x->operand()); break;
 691   case Bytecodes::_ior:
 692   case Bytecodes::_lor:
 693     __ logical_or (left.result(), right.result(), x->operand()); break;
 694   case Bytecodes::_ixor:
 695   case Bytecodes::_lxor:
 696     __ logical_xor(left.result(), right.result(), x->operand()); break;
 697   default: Unimplemented();
 698   }
 699 }
 700 
 701 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 702 void LIRGenerator::do_CompareOp(CompareOp* x) {
 703   LIRItem left(x->x(), this);
 704   LIRItem right(x->y(), this);
 705   ValueTag tag = x->x()->type()->tag();
 706   if (tag == longTag) {
 707     left.set_destroys_register();
 708   }
 709   left.load_item();
 710   right.load_item();
 711   LIR_Opr reg = rlock_result(x);
 712 
 713   if (x->x()->type()->is_float_kind()) {
 714     Bytecodes::Code code = x->op();
 715     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 716   } else if (x->x()->type()->tag() == longTag) {
 717     __ lcmp2int(left.result(), right.result(), reg);
 718   } else {
 719     Unimplemented();
 720   }
 721 }
 722 
 723 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 724   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 725   new_value.load_item();
 726   cmp_value.load_item();
 727   LIR_Opr result = new_register(T_INT);
 728   if (is_reference_type(type)) {
 729     __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
 730   } else if (type == T_INT) {
 731     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 732   } else if (type == T_LONG) {
 733     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 734   } else {
 735     ShouldNotReachHere();
 736     Unimplemented();
 737   }
 738   __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
 739   return result;
 740 }
 741 
 742 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 743   bool is_oop = is_reference_type(type);
 744   LIR_Opr result = new_register(type);
 745   value.load_item();
 746   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
 747   LIR_Opr tmp = new_register(T_INT);
 748   __ xchg(addr, value.result(), result, tmp);
 749   return result;
 750 }
 751 
 752 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 753   LIR_Opr result = new_register(type);
 754   value.load_item();
 755   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
 756   LIR_Opr tmp = new_register(T_INT);
 757   __ xadd(addr, value.result(), result, tmp);
 758   return result;
 759 }
 760 
 761 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 762   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
 763   if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
 764       x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
 765       x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
 766       x->id() == vmIntrinsics::_dlog10) {
 767     do_LibmIntrinsic(x);
 768     return;
 769   }
 770   switch (x->id()) {
 771     case vmIntrinsics::_dabs:
 772     case vmIntrinsics::_dsqrt:
 773     case vmIntrinsics::_dsqrt_strict:
 774     case vmIntrinsics::_floatToFloat16:
 775     case vmIntrinsics::_float16ToFloat: {
 776       assert(x->number_of_arguments() == 1, "wrong type");
 777       LIRItem value(x->argument_at(0), this);
 778       value.load_item();
 779       LIR_Opr src = value.result();
 780       LIR_Opr dst = rlock_result(x);
 781 
 782       switch (x->id()) {
 783         case vmIntrinsics::_dsqrt:
 784         case vmIntrinsics::_dsqrt_strict: {
 785           __ sqrt(src, dst, LIR_OprFact::illegalOpr);
 786           break;
 787         }
 788         case vmIntrinsics::_dabs: {
 789           __ abs(src, dst, LIR_OprFact::illegalOpr);
 790           break;
 791         }
 792         case vmIntrinsics::_floatToFloat16: {
 793           LIR_Opr tmp = new_register(T_FLOAT);
 794           __ f2hf(src, dst, tmp);
 795           break;
 796         }
 797         case vmIntrinsics::_float16ToFloat: {
 798           LIR_Opr tmp = new_register(T_FLOAT);
 799           __ hf2f(src, dst, tmp);
 800           break;
 801         }
 802         default:
 803           ShouldNotReachHere();
 804       }
 805       break;
 806     }
 807     default:
 808       ShouldNotReachHere();
 809   }
 810 }
 811 
 812 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
 813   LIRItem value(x->argument_at(0), this);
 814   value.set_destroys_register();
 815 
 816   LIR_Opr calc_result = rlock_result(x);
 817   LIR_Opr result_reg = result_register_for(x->type());
 818 
 819   CallingConvention* cc = nullptr;
 820 
 821   if (x->id() == vmIntrinsics::_dpow) {
 822     LIRItem value1(x->argument_at(1), this);
 823 
 824     value1.set_destroys_register();
 825 
 826     BasicTypeList signature(2);
 827     signature.append(T_DOUBLE);
 828     signature.append(T_DOUBLE);
 829     cc = frame_map()->c_calling_convention(&signature);
 830     value.load_item_force(cc->at(0));
 831     value1.load_item_force(cc->at(1));
 832   } else {
 833     BasicTypeList signature(1);
 834     signature.append(T_DOUBLE);
 835     cc = frame_map()->c_calling_convention(&signature);
 836     value.load_item_force(cc->at(0));
 837   }
 838 
 839   switch (x->id()) {
 840     case vmIntrinsics::_dexp:
 841       if (StubRoutines::dexp() != nullptr) {
 842         __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
 843       } else {
 844         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
 845       }
 846       break;
 847     case vmIntrinsics::_dlog:
 848       // Math.log intrinsic is not implemented on AArch64 (see JDK-8210858),
 849       // but we can still call the shared runtime.
 850       __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
 851       break;
 852     case vmIntrinsics::_dlog10:
 853       __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
 854       break;
 855     case vmIntrinsics::_dpow:
 856       if (StubRoutines::dpow() != nullptr) {
 857         __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
 858       } else {
 859         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
 860       }
 861       break;
 862     case vmIntrinsics::_dsin:
 863       if (StubRoutines::dsin() != nullptr) {
 864         __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
 865       } else {
 866         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
 867       }
 868       break;
 869     case vmIntrinsics::_dcos:
 870       if (StubRoutines::dcos() != nullptr) {
 871         __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
 872       } else {
 873         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
 874       }
 875       break;
 876     case vmIntrinsics::_dtan:
 877       if (StubRoutines::dtan() != nullptr) {
 878         __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
 879       } else {
 880         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
 881       }
 882       break;
 883     default:  ShouldNotReachHere();
 884   }
 885   __ move(result_reg, calc_result);
 886 }
 887 
 888 
 889 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 890   assert(x->number_of_arguments() == 5, "wrong type");
 891 
 892   // Make all state_for calls early since they can emit code
 893   CodeEmitInfo* info = nullptr;
 894   if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
 895     info = state_for(x, x->state_before());
 896     info->set_force_reexecute();
 897   } else {
 898     info = state_for(x, x->state());
 899   }
 900 
 901   LIRItem src(x->argument_at(0), this);
 902   LIRItem src_pos(x->argument_at(1), this);
 903   LIRItem dst(x->argument_at(2), this);
 904   LIRItem dst_pos(x->argument_at(3), this);
 905   LIRItem length(x->argument_at(4), this);
 906 
 907   // operands for arraycopy must use fixed registers, otherwise
 908   // LinearScan will fail allocation (because arraycopy always needs a
 909   // call)
 910 
 911   // The java calling convention will give us enough registers
 912   // so that on the stub side the args will be perfect already.
 913   // On the other slow/special case side we call C and the arg
 914   // positions are not similar enough to pick one as the best.
 915   // Also because the java calling convention is a "shifted" version
 916   // of the C convention we can process the java args trivially into C
 917   // args without worry of overwriting during the xfer
 918 
 919   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 920   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 921   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 922   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 923   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 924 
 925   LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
 926 
 927   set_no_result(x);
 928 
 929   int flags;
 930   ciArrayKlass* expected_type;
 931   arraycopy_helper(x, &flags, &expected_type);
 932   if (x->check_flag(Instruction::OmitChecksFlag)) {
 933     flags = 0;
 934   }
 935 
 936   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
 937 }
 938 
 939 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 940   assert(UseCRC32Intrinsics, "why are we here?");
 941   // Make all state_for calls early since they can emit code
 942   LIR_Opr result = rlock_result(x);
 943   switch (x->id()) {
 944     case vmIntrinsics::_updateCRC32: {
 945       LIRItem crc(x->argument_at(0), this);
 946       LIRItem val(x->argument_at(1), this);
 947       // val is destroyed by update_crc32
 948       val.set_destroys_register();
 949       crc.load_item();
 950       val.load_item();
 951       __ update_crc32(crc.result(), val.result(), result);
 952       break;
 953     }
 954     case vmIntrinsics::_updateBytesCRC32:
 955     case vmIntrinsics::_updateByteBufferCRC32: {
 956       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
 957 
 958       LIRItem crc(x->argument_at(0), this);
 959       LIRItem buf(x->argument_at(1), this);
 960       LIRItem off(x->argument_at(2), this);
 961       LIRItem len(x->argument_at(3), this);
 962       buf.load_item();
 963       off.load_nonconstant();
 964 
 965       LIR_Opr index = off.result();
 966       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
 967       if (off.result()->is_constant()) {
 968         index = LIR_OprFact::illegalOpr;
 969         offset += off.result()->as_jint();
 970       }
 971       LIR_Opr base_op = buf.result();
 972 
 973       if (index->is_valid()) {
 974         LIR_Opr tmp = new_register(T_LONG);
 975         __ convert(Bytecodes::_i2l, index, tmp);
 976         index = tmp;
 977       }
 978 
 979       if (offset) {
 980         LIR_Opr tmp = new_pointer_register();
 981         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
 982         base_op = tmp;
 983         offset = 0;
 984       }
 985 
 986       LIR_Address* a = new LIR_Address(base_op,
 987                                        index,
 988                                        offset,
 989                                        T_BYTE);
 990       BasicTypeList signature(3);
 991       signature.append(T_INT);
 992       signature.append(T_ADDRESS);
 993       signature.append(T_INT);
 994       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 995       const LIR_Opr result_reg = result_register_for(x->type());
 996 
 997       LIR_Opr addr = new_pointer_register();
 998       __ leal(LIR_OprFact::address(a), addr);
 999 
1000       crc.load_item_force(cc->at(0));
1001       __ move(addr, cc->at(1));
1002       len.load_item_force(cc->at(2));
1003 
1004       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1005       __ move(result_reg, result);
1006 
1007       break;
1008     }
1009     default: {
1010       ShouldNotReachHere();
1011     }
1012   }
1013 }
1014 
1015 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1016   assert(UseCRC32CIntrinsics, "why are we here?");
1017   // Make all state_for calls early since they can emit code
1018   LIR_Opr result = rlock_result(x);
1019   switch (x->id()) {
1020     case vmIntrinsics::_updateBytesCRC32C:
1021     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1022       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1023       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1024 
1025       LIRItem crc(x->argument_at(0), this);
1026       LIRItem buf(x->argument_at(1), this);
1027       LIRItem off(x->argument_at(2), this);
1028       LIRItem end(x->argument_at(3), this);
1029 
1030       buf.load_item();
1031       off.load_nonconstant();
1032       end.load_nonconstant();
1033 
1034       // len = end - off
1035       LIR_Opr len  = end.result();
1036       LIR_Opr tmpA = new_register(T_INT);
1037       LIR_Opr tmpB = new_register(T_INT);
1038       __ move(end.result(), tmpA);
1039       __ move(off.result(), tmpB);
1040       __ sub(tmpA, tmpB, tmpA);
1041       len = tmpA;
1042 
1043       LIR_Opr index = off.result();
1044       if(off.result()->is_constant()) {
1045         index = LIR_OprFact::illegalOpr;
1046         offset += off.result()->as_jint();
1047       }
1048       LIR_Opr base_op = buf.result();
1049 
1050       if (index->is_valid()) {
1051         LIR_Opr tmp = new_register(T_LONG);
1052         __ convert(Bytecodes::_i2l, index, tmp);
1053         index = tmp;
1054       }
1055 
1056       if (offset) {
1057         LIR_Opr tmp = new_pointer_register();
1058         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1059         base_op = tmp;
1060         offset = 0;
1061       }
1062 
1063       LIR_Address* a = new LIR_Address(base_op,
1064                                        index,
1065                                        offset,
1066                                        T_BYTE);
1067       BasicTypeList signature(3);
1068       signature.append(T_INT);
1069       signature.append(T_ADDRESS);
1070       signature.append(T_INT);
1071       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1072       const LIR_Opr result_reg = result_register_for(x->type());
1073 
1074       LIR_Opr addr = new_pointer_register();
1075       __ leal(LIR_OprFact::address(a), addr);
1076 
1077       crc.load_item_force(cc->at(0));
1078       __ move(addr, cc->at(1));
1079       __ move(len, cc->at(2));
1080 
1081       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args());
1082       __ move(result_reg, result);
1083 
1084       break;
1085     }
1086     default: {
1087       ShouldNotReachHere();
1088     }
1089   }
1090 }
1091 
1092 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1093   assert(x->number_of_arguments() == 3, "wrong type");
1094   assert(UseFMA, "Needs FMA instructions support.");
1095   LIRItem value(x->argument_at(0), this);
1096   LIRItem value1(x->argument_at(1), this);
1097   LIRItem value2(x->argument_at(2), this);
1098 
1099   value.load_item();
1100   value1.load_item();
1101   value2.load_item();
1102 
1103   LIR_Opr calc_input = value.result();
1104   LIR_Opr calc_input1 = value1.result();
1105   LIR_Opr calc_input2 = value2.result();
1106   LIR_Opr calc_result = rlock_result(x);
1107 
1108   switch (x->id()) {
1109   case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1110   case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1111   default:                    ShouldNotReachHere();
1112   }
1113 }
1114 
1115 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1116   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1117 }
1118 
1119 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1120 // _i2b, _i2c, _i2s
1121 void LIRGenerator::do_Convert(Convert* x) {
1122   LIRItem value(x->value(), this);
1123   value.load_item();
1124   LIR_Opr input = value.result();
1125   LIR_Opr result = rlock(x);
1126 
1127   // arguments of lir_convert
1128   LIR_Opr conv_input = input;
1129   LIR_Opr conv_result = result;
1130 
1131   __ convert(x->op(), conv_input, conv_result);
1132 
1133   assert(result->is_virtual(), "result must be virtual register");
1134   set_result(x, result);
1135 }
1136 
1137 void LIRGenerator::do_NewInstance(NewInstance* x) {
1138 #ifndef PRODUCT
1139   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1140     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1141   }
1142 #endif
1143   CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1144   LIR_Opr reg = result_register_for(x->type());
1145   new_instance(reg, x->klass(), x->is_unresolved(),
1146                /* allow_inline */ false,
1147                FrameMap::r10_oop_opr,
1148                FrameMap::r11_oop_opr,
1149                FrameMap::r4_oop_opr,
1150                LIR_OprFact::illegalOpr,
1151                FrameMap::r3_metadata_opr, info);
1152   LIR_Opr result = rlock_result(x);
1153   __ move(reg, result);
1154 }
1155 
1156 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1157   CodeEmitInfo* info = nullptr;
1158   if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1159     info = state_for(x, x->state_before());
1160     info->set_force_reexecute();
1161   } else {
1162     info = state_for(x, x->state());
1163   }
1164 
1165   LIRItem length(x->length(), this);
1166   length.load_item_force(FrameMap::r19_opr);
1167 
1168   LIR_Opr reg = result_register_for(x->type());
1169   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1170   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1171   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1172   LIR_Opr tmp4 = reg;
1173   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1174   LIR_Opr len = length.result();
1175   BasicType elem_type = x->elt_type();
1176 
1177   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1178 
1179   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1180   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array());
1181 
1182   LIR_Opr result = rlock_result(x);
1183   __ move(reg, result);
1184 }
1185 
1186 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1187   LIRItem length(x->length(), this);
1188   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1189   // and therefore provide the state before the parameters have been consumed
1190   CodeEmitInfo* patching_info = nullptr;
1191   if (!x->klass()->is_loaded() || PatchALot) {
1192     patching_info =  state_for(x, x->state_before());
1193   }
1194 
1195   CodeEmitInfo* info = state_for(x, x->state());
1196 
1197   LIR_Opr reg = result_register_for(x->type());
1198   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1199   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1200   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1201   LIR_Opr tmp4 = reg;
1202   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1203 
1204   length.load_item_force(FrameMap::r19_opr);
1205   LIR_Opr len = length.result();
1206 
1207   ciKlass* obj = (ciKlass*) x->exact_type();
1208   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1209   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1210     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1211   }
1212 
1213   klass2reg_with_patching(klass_reg, obj, patching_info);
1214   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, x->is_null_free());
1215 
1216   LIR_Opr result = rlock_result(x);
1217   __ move(reg, result);
1218 }
1219 
1220 
1221 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1222   Values* dims = x->dims();
1223   int i = dims->length();
1224   LIRItemList* items = new LIRItemList(i, i, nullptr);
1225   while (i-- > 0) {
1226     LIRItem* size = new LIRItem(dims->at(i), this);
1227     items->at_put(i, size);
1228   }
1229 
1230   // Evaluate state_for early since it may emit code.
1231   CodeEmitInfo* patching_info = nullptr;
1232   if (!x->klass()->is_loaded() || PatchALot) {
1233     patching_info = state_for(x, x->state_before());
1234 
1235     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1236     // clone all handlers (NOTE: Usually this is handled transparently
1237     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1238     // is done explicitly here because a stub isn't being used).
1239     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1240   }
1241   CodeEmitInfo* info = state_for(x, x->state());
1242 
1243   i = dims->length();
1244   while (i-- > 0) {
1245     LIRItem* size = items->at(i);
1246     size->load_item();
1247 
1248     store_stack_parameter(size->result(), in_ByteSize(i*4));
1249   }
1250 
1251   LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1252   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1253 
1254   LIR_Opr rank = FrameMap::r19_opr;
1255   __ move(LIR_OprFact::intConst(x->rank()), rank);
1256   LIR_Opr varargs = FrameMap::r2_opr;
1257   __ move(FrameMap::sp_opr, varargs);
1258   LIR_OprList* args = new LIR_OprList(3);
1259   args->append(klass_reg);
1260   args->append(rank);
1261   args->append(varargs);
1262   LIR_Opr reg = result_register_for(x->type());
1263   __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
1264                   LIR_OprFact::illegalOpr,
1265                   reg, args, info);
1266 
1267   LIR_Opr result = rlock_result(x);
1268   __ move(reg, result);
1269 }
1270 
1271 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1272   // nothing to do for now
1273 }
1274 
1275 void LIRGenerator::do_CheckCast(CheckCast* x) {
1276   LIRItem obj(x->obj(), this);
1277 
1278   CodeEmitInfo* patching_info = nullptr;
1279   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1280     // must do this before locking the destination register as an oop register,
1281     // and before the obj is loaded (the latter is for deoptimization)
1282     patching_info = state_for(x, x->state_before());
1283   }
1284   obj.load_item();
1285 
1286   // info for exceptions
1287   CodeEmitInfo* info_for_exception =
1288       (x->needs_exception_state() ? state_for(x) :
1289                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1290   if (x->is_null_free()) {
1291     __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1292   }
1293 
1294   CodeStub* stub;
1295   if (x->is_incompatible_class_change_check()) {
1296     assert(patching_info == nullptr, "can't patch this");
1297     stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1298   } else if (x->is_invokespecial_receiver_check()) {
1299     assert(patching_info == nullptr, "can't patch this");
1300     stub = new DeoptimizeStub(info_for_exception,
1301                               Deoptimization::Reason_class_check,
1302                               Deoptimization::Action_none);
1303   } else {
1304     stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1305   }
1306   LIR_Opr reg = rlock_result(x);
1307   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1308   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1309     tmp3 = new_register(objectType);
1310   }
1311 
1312 
1313   __ checkcast(reg, obj.result(), x->klass(),
1314                new_register(objectType), new_register(objectType), tmp3,
1315                x->direct_compare(), info_for_exception, patching_info, stub,
1316                x->profiled_method(), x->profiled_bci(), x->is_null_free());
1317 
1318 }
1319 
1320 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1321   LIRItem obj(x->obj(), this);
1322 
1323   // result and test object may not be in same register
1324   LIR_Opr reg = rlock_result(x);
1325   CodeEmitInfo* patching_info = nullptr;
1326   if ((!x->klass()->is_loaded() || PatchALot)) {
1327     // must do this before locking the destination register as an oop register
1328     patching_info = state_for(x, x->state_before());
1329   }
1330   obj.load_item();
1331   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1332   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1333     tmp3 = new_register(objectType);
1334   }
1335   __ instanceof(reg, obj.result(), x->klass(),
1336                 new_register(objectType), new_register(objectType), tmp3,
1337                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1338 }
1339 
1340 void LIRGenerator::do_If(If* x) {
1341   assert(x->number_of_sux() == 2, "inconsistency");
1342   ValueTag tag = x->x()->type()->tag();
1343   bool is_safepoint = x->is_safepoint();
1344 
1345   If::Condition cond = x->cond();
1346 
1347   LIRItem xitem(x->x(), this);
1348   LIRItem yitem(x->y(), this);
1349   LIRItem* xin = &xitem;
1350   LIRItem* yin = &yitem;
1351 
1352   if (tag == longTag) {
1353     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1354     // mirror for other conditions
1355     if (cond == If::gtr || cond == If::leq) {
1356       cond = Instruction::mirror(cond);
1357       xin = &yitem;
1358       yin = &xitem;
1359     }
1360     xin->set_destroys_register();
1361   }
1362   xin->load_item();
1363 
1364   if (tag == longTag) {
1365     if (yin->is_constant()
1366         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1367       yin->dont_load_item();
1368     } else {
1369       yin->load_item();
1370     }
1371   } else if (tag == intTag) {
1372     if (yin->is_constant()
1373         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant()))  {
1374       yin->dont_load_item();
1375     } else {
1376       yin->load_item();
1377     }
1378   } else {
1379     yin->load_item();
1380   }
1381 
1382   set_no_result(x);
1383 
1384   LIR_Opr left = xin->result();
1385   LIR_Opr right = yin->result();
1386 
1387   // add safepoint before generating condition code so it can be recomputed
1388   if (x->is_safepoint()) {
1389     // increment backedge counter if needed
1390     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1391         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1392     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1393   }
1394 
1395   if (x->substitutability_check()) {
1396     substitutability_check(x, *xin, *yin);
1397   } else {
1398     __ cmp(lir_cond(cond), left, right);
1399   }
1400 
1401   // Generate branch profiling. Profiling code doesn't kill flags.
1402   profile_branch(x, cond);
1403   move_to_phi(x->state());
1404   if (x->x()->type()->is_float_kind()) {
1405     __ branch(lir_cond(cond), x->tsux(), x->usux());
1406   } else {
1407     __ branch(lir_cond(cond), x->tsux());
1408   }
1409   assert(x->default_sux() == x->fsux(), "wrong destination above");
1410   __ jump(x->default_sux());
1411 }
1412 
1413 LIR_Opr LIRGenerator::getThreadPointer() {
1414    return FrameMap::as_pointer_opr(rthread);
1415 }
1416 
1417 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1418 
1419 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1420                                         CodeEmitInfo* info) {
1421   __ volatile_store_mem_reg(value, address, info);
1422 }
1423 
1424 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1425                                        CodeEmitInfo* info) {
1426   // 8179954: We need to make sure that the code generated for
1427   // volatile accesses forms a sequentially-consistent set of
1428   // operations when combined with STLR and LDAR.  Without a leading
1429   // membar it's possible for a simple Dekker test to fail if loads
1430   // use LD;DMB but stores use STLR.  This can happen if C2 compiles
1431   // the stores in one method and C1 compiles the loads in another.
1432   if (!CompilerConfig::is_c1_only_no_jvmci()) {
1433     __ membar();
1434   }
1435   __ volatile_load_mem_reg(address, result, info);
1436 }