1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2024 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArray.hpp"
  35 #include "ci/ciObjArrayKlass.hpp"
  36 #include "ci/ciTypeArrayKlass.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/vm_version.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 #include "vmreg_ppc.inline.hpp"
  42 #include <stdint.h>
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 void LIRItem::load_byte_item() {
  51   // Byte loads use same registers as other loads.
  52   load_item();
  53 }
  54 
  55 
  56 void LIRItem::load_nonconstant() {
  57   LIR_Opr r = value()->operand();
  58   if (_gen->can_inline_as_constant(value())) {
  59     if (!r->is_constant()) {
  60       r = LIR_OprFact::value_type(value()->type());
  61     }
  62     _result = r;
  63   } else {
  64     load_item();
  65   }
  66 }
  67 
  68 
  69 //--------------------------------------------------------------
  70 //               LIRGenerator
  71 //--------------------------------------------------------------
  72 
  73 LIR_Opr LIRGenerator::exceptionOopOpr()              { return FrameMap::R3_oop_opr; }
  74 LIR_Opr LIRGenerator::exceptionPcOpr()               { return FrameMap::R4_opr; }
  75 LIR_Opr LIRGenerator::syncLockOpr()                  { return FrameMap::R5_opr; }     // Need temp effect for MonitorEnterStub.
  76 LIR_Opr LIRGenerator::syncTempOpr()                  { return FrameMap::R4_oop_opr; } // Need temp effect for MonitorEnterStub.
  77 LIR_Opr LIRGenerator::getThreadTemp()                { return LIR_OprFact::illegalOpr; } // not needed
  78 
  79 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  80   LIR_Opr opr;
  81   switch (type->tag()) {
  82   case intTag:     opr = FrameMap::R3_opr;         break;
  83   case objectTag:  opr = FrameMap::R3_oop_opr;     break;
  84   case longTag:    opr = FrameMap::R3_long_opr;    break;
  85   case floatTag:   opr = FrameMap::F1_opr;         break;
  86   case doubleTag:  opr = FrameMap::F1_double_opr;  break;
  87 
  88   case addressTag:
  89   default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  90   }
  91 
  92   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  93   return opr;
  94 }
  95 
  96 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
  97   ShouldNotReachHere();
  98   return LIR_OprFact::illegalOpr;
  99 }
 100 
 101 
 102 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 103   return new_register(T_INT);
 104 }
 105 
 106 
 107 //--------- loading items into registers --------------------------------
 108 
 109 // PPC cannot inline all constants.
 110 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 111   if (v->type()->as_IntConstant() != nullptr) {
 112     return Assembler::is_simm16(v->type()->as_IntConstant()->value());
 113   } else if (v->type()->as_LongConstant() != nullptr) {
 114     return Assembler::is_simm16(v->type()->as_LongConstant()->value());
 115   } else if (v->type()->as_ObjectConstant() != nullptr) {
 116     return v->type()->as_ObjectConstant()->value()->is_null_object();
 117   } else {
 118     return false;
 119   }
 120 }
 121 
 122 
 123 // Only simm16 constants can be inlined.
 124 bool LIRGenerator::can_inline_as_constant(Value i) const {
 125   return can_store_as_constant(i, as_BasicType(i->type()));
 126 }
 127 
 128 
 129 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 130   if (c->type() == T_INT) {
 131     return Assembler::is_simm16(c->as_jint());
 132   }
 133   if (c->type() == T_LONG) {
 134     return Assembler::is_simm16(c->as_jlong());
 135   }
 136   if (c->type() == T_OBJECT) {
 137     return c->as_jobject() == nullptr;
 138   }
 139   return false;
 140 }
 141 
 142 
 143 LIR_Opr LIRGenerator::safepoint_poll_register() {
 144   return new_register(T_INT);
 145 }
 146 
 147 
 148 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 149                                             int shift, int disp, BasicType type) {
 150   assert(base->is_register(), "must be");
 151   intx large_disp = disp;
 152 
 153   // Accumulate fixed displacements.
 154   if (index->is_constant()) {
 155     LIR_Const *constant = index->as_constant_ptr();
 156     if (constant->type() == T_LONG) {
 157       large_disp += constant->as_jlong() << shift;
 158     } else {
 159       large_disp += (intx)(constant->as_jint()) << shift;
 160     }
 161     index = LIR_OprFact::illegalOpr;
 162   }
 163 
 164   if (index->is_register()) {
 165     // Apply the shift and accumulate the displacement.
 166     if (shift > 0) {
 167       // Use long register to avoid overflow when shifting large index values left.
 168       LIR_Opr tmp = new_register(T_LONG);
 169       __ convert(Bytecodes::_i2l, index, tmp);
 170       __ shift_left(tmp, shift, tmp);
 171       index = tmp;
 172     }
 173     if (large_disp != 0) {
 174       LIR_Opr tmp = new_pointer_register();
 175       if (Assembler::is_simm16(large_disp)) {
 176         __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
 177         index = tmp;
 178       } else {
 179         __ move(LIR_OprFact::intptrConst(large_disp), tmp);
 180         __ add(tmp, index, tmp);
 181         index = tmp;
 182       }
 183       large_disp = 0;
 184     }
 185   } else if (!Assembler::is_simm16(large_disp)) {
 186     // Index is illegal so replace it with the displacement loaded into a register.
 187     index = new_pointer_register();
 188     __ move(LIR_OprFact::intptrConst(large_disp), index);
 189     large_disp = 0;
 190   }
 191 
 192   // At this point we either have base + index or base + displacement.
 193   if (large_disp == 0) {
 194     return new LIR_Address(base, index, type);
 195   } else {
 196     assert(Assembler::is_simm16(large_disp), "must be");
 197     return new LIR_Address(base, large_disp, type);
 198   }
 199 }
 200 
 201 
 202 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 203                                               BasicType type) {
 204   int elem_size = type2aelembytes(type);
 205   int shift = exact_log2(elem_size);
 206 
 207   LIR_Opr base_opr;
 208   intx offset = arrayOopDesc::base_offset_in_bytes(type);
 209 
 210   if (index_opr->is_constant()) {
 211     intx i = index_opr->as_constant_ptr()->as_jint();
 212     intx array_offset = i * elem_size;
 213     if (Assembler::is_simm16(array_offset + offset)) {
 214       base_opr = array_opr;
 215       offset = array_offset + offset;
 216     } else {
 217       base_opr = new_pointer_register();
 218       if (Assembler::is_simm16(array_offset)) {
 219         __ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr);
 220       } else {
 221         __ move(LIR_OprFact::intptrConst(array_offset), base_opr);
 222         __ add(base_opr, array_opr, base_opr);
 223       }
 224     }
 225   } else {
 226 #ifdef _LP64
 227     if (index_opr->type() == T_INT) {
 228       LIR_Opr tmp = new_register(T_LONG);
 229       __ convert(Bytecodes::_i2l, index_opr, tmp);
 230       index_opr = tmp;
 231     }
 232 #endif
 233 
 234     base_opr = new_pointer_register();
 235     assert (index_opr->is_register(), "Must be register");
 236     if (shift > 0) {
 237       __ shift_left(index_opr, shift, base_opr);
 238       __ add(base_opr, array_opr, base_opr);
 239     } else {
 240       __ add(index_opr, array_opr, base_opr);
 241     }
 242   }
 243   return new LIR_Address(base_opr, offset, type);
 244 }
 245 
 246 
 247 LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) {
 248   LIR_Opr r;
 249   if (type == T_LONG) {
 250     r = LIR_OprFact::longConst(x);
 251   } else if (type == T_INT) {
 252     r = LIR_OprFact::intConst(checked_cast<jint>(x));
 253   } else {
 254     ShouldNotReachHere();
 255   }
 256   if (!Assembler::is_simm16(x)) {
 257     LIR_Opr tmp = new_register(type);
 258     __ move(r, tmp);
 259     return tmp;
 260   }
 261   return r;
 262 }
 263 
 264 
 265 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 266   LIR_Opr pointer = new_pointer_register();
 267   __ move(LIR_OprFact::intptrConst(counter), pointer);
 268   LIR_Address* addr = new LIR_Address(pointer, type);
 269   increment_counter(addr, step);
 270 }
 271 
 272 
 273 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 274   LIR_Opr temp = new_register(addr->type());
 275   __ move(addr, temp);
 276   __ add(temp, load_immediate(step, addr->type()), temp);
 277   __ move(temp, addr);
 278 }
 279 
 280 
 281 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 282   LIR_Opr tmp = FrameMap::R0_opr;
 283   __ load(new LIR_Address(base, disp, T_INT), tmp, info);
 284   __ cmp(condition, tmp, c);
 285 }
 286 
 287 
 288 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base,
 289                                int disp, BasicType type, CodeEmitInfo* info) {
 290   LIR_Opr tmp = FrameMap::R0_opr;
 291   __ load(new LIR_Address(base, disp, type), tmp, info);
 292   __ cmp(condition, reg, tmp);
 293 }
 294 
 295 
 296 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
 297   assert(left != result, "should be different registers");
 298   // Using unsigned arithmetics to avoid undefined behavior due to integer overflow.
 299   // The involved operations are not sensitive to signedness.
 300   juint u_value = (juint)c;
 301   if (is_power_of_2(u_value + 1)) {
 302     __ shift_left(left, log2i_exact(u_value + 1), result);
 303     __ sub(result, left, result);
 304     return true;
 305   } else if (is_power_of_2(u_value - 1)) {
 306     __ shift_left(left, log2i_exact(u_value - 1), result);
 307     __ add(result, left, result);
 308     return true;
 309   } else if (c == -1) {
 310     __ negate(left, result);
 311     return true;
 312   }
 313   return false;
 314 }
 315 
 316 
 317 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
 318   BasicType t = item->type();
 319   LIR_Opr sp_opr = FrameMap::SP_opr;
 320   __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
 321 }
 322 
 323 
 324 //----------------------------------------------------------------------
 325 //             visitor functions
 326 //----------------------------------------------------------------------
 327 
 328 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 329   // Following registers are used by slow_subtype_check:
 330   LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
 331   LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
 332   LIR_Opr tmp3 = FrameMap::R6_opr; // temp
 333   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 334 }
 335 
 336 
 337 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 338   assert(x->is_pinned(),"");
 339   LIRItem obj(x->obj(), this);
 340   obj.load_item();
 341 
 342   set_no_result(x);
 343 
 344   // We use R4+R5 in order to get a temp effect. These regs are used in slow path (MonitorEnterStub).
 345   LIR_Opr lock    = FrameMap::R5_opr;
 346   LIR_Opr scratch = FrameMap::R4_opr;
 347   LIR_Opr hdr     = FrameMap::R6_opr;
 348 
 349   CodeEmitInfo* info_for_exception = nullptr;
 350   if (x->needs_null_check()) {
 351     info_for_exception = state_for(x);
 352   }
 353 
 354   // This CodeEmitInfo must not have the xhandlers because here the
 355   // object is already locked (xhandlers expects object to be unlocked).
 356   CodeEmitInfo* info = state_for(x, x->state(), true);
 357   monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
 358 }
 359 
 360 
 361 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 362   assert(x->is_pinned(),"");
 363   LIRItem obj(x->obj(), this);
 364   obj.dont_load_item();
 365 
 366   set_no_result(x);
 367   LIR_Opr lock     = FrameMap::R5_opr;
 368   LIR_Opr hdr      = FrameMap::R4_opr; // Used for slow path (MonitorExitStub).
 369   LIR_Opr obj_temp = FrameMap::R6_opr;
 370   monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
 371 }
 372 
 373 
 374 // _ineg, _lneg, _fneg, _dneg
 375 void LIRGenerator::do_NegateOp(NegateOp* x) {
 376   LIRItem value(x->x(), this);
 377   value.load_item();
 378   LIR_Opr reg = rlock_result(x);
 379   __ negate(value.result(), reg);
 380 }
 381 
 382 
 383 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 384 //      _dadd, _dmul, _dsub, _ddiv, _drem
 385 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 386   switch (x->op()) {
 387   case Bytecodes::_fadd:
 388   case Bytecodes::_fmul:
 389   case Bytecodes::_fsub:
 390   case Bytecodes::_fdiv:
 391   case Bytecodes::_dadd:
 392   case Bytecodes::_dmul:
 393   case Bytecodes::_dsub:
 394   case Bytecodes::_ddiv: {
 395     LIRItem left(x->x(), this);
 396     LIRItem right(x->y(), this);
 397     left.load_item();
 398     right.load_item();
 399     rlock_result(x);
 400     arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result());
 401   }
 402   break;
 403 
 404   case Bytecodes::_frem:
 405   case Bytecodes::_drem: {
 406     address entry = nullptr;
 407     switch (x->op()) {
 408     case Bytecodes::_frem:
 409       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 410       break;
 411     case Bytecodes::_drem:
 412       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 413       break;
 414     default:
 415       ShouldNotReachHere();
 416     }
 417     LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), nullptr);
 418     set_result(x, result);
 419   }
 420   break;
 421 
 422   default: ShouldNotReachHere();
 423   }
 424 }
 425 
 426 
 427 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 428 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 429   bool is_div_rem = x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem;
 430 
 431   LIRItem right(x->y(), this);
 432   // Missing test if instr is commutative and if we should swap.
 433   if (right.value()->type()->as_LongConstant() &&
 434       (x->op() == Bytecodes::_lsub && right.value()->type()->as_LongConstant()->value() == INT16_MIN) ) {
 435     // Sub is implemented by addi and can't support min_simm16 as constant..
 436     right.load_item();
 437   } else {
 438     right.load_nonconstant();
 439   }
 440   assert(right.is_constant() || right.is_register(), "wrong state of right");
 441 
 442   if (is_div_rem) {
 443     LIR_Opr divisor = right.result();
 444     if (divisor->is_register()) {
 445       CodeEmitInfo* null_check_info = state_for(x);
 446       __ cmp(lir_cond_equal, divisor, LIR_OprFact::longConst(0));
 447       __ branch(lir_cond_equal, new DivByZeroStub(null_check_info));
 448     } else {
 449       jlong const_divisor = divisor->as_constant_ptr()->as_jlong();
 450       if (const_divisor == 0) {
 451         CodeEmitInfo* null_check_info = state_for(x);
 452         __ jump(new DivByZeroStub(null_check_info));
 453         rlock_result(x);
 454         __ move(LIR_OprFact::longConst(0), x->operand()); // dummy
 455         return;
 456       }
 457       if (x->op() == Bytecodes::_lrem && !is_power_of_2(const_divisor) && const_divisor != -1) {
 458         // Remainder computation would need additional tmp != R0.
 459         right.load_item();
 460       }
 461     }
 462   }
 463 
 464   LIRItem left(x->x(), this);
 465   left.load_item();
 466   rlock_result(x);
 467   if (is_div_rem) {
 468     CodeEmitInfo* info = nullptr; // Null check already done above.
 469     LIR_Opr tmp = FrameMap::R0_opr;
 470     if (x->op() == Bytecodes::_lrem) {
 471       __ irem(left.result(), right.result(), x->operand(), tmp, info);
 472     } else if (x->op() == Bytecodes::_ldiv) {
 473       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
 474     }
 475   } else {
 476     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
 477   }
 478 }
 479 
 480 
 481 // for: _iadd, _imul, _isub, _idiv, _irem
 482 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 483   bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
 484 
 485   LIRItem right(x->y(), this);
 486   // Missing test if instr is commutative and if we should swap.
 487   if (right.value()->type()->as_IntConstant() &&
 488       (x->op() == Bytecodes::_isub && right.value()->type()->as_IntConstant()->value() == INT16_MIN) ) {
 489     // Sub is implemented by addi and can't support min_simm16 as constant.
 490     right.load_item();
 491   } else {
 492     right.load_nonconstant();
 493   }
 494   assert(right.is_constant() || right.is_register(), "wrong state of right");
 495 
 496   if (is_div_rem) {
 497     LIR_Opr divisor = right.result();
 498     if (divisor->is_register()) {
 499       CodeEmitInfo* null_check_info = state_for(x);
 500       __ cmp(lir_cond_equal, divisor, LIR_OprFact::intConst(0));
 501       __ branch(lir_cond_equal, new DivByZeroStub(null_check_info));
 502     } else {
 503       jint const_divisor = divisor->as_constant_ptr()->as_jint();
 504       if (const_divisor == 0) {
 505         CodeEmitInfo* null_check_info = state_for(x);
 506         __ jump(new DivByZeroStub(null_check_info));
 507         rlock_result(x);
 508         __ move(LIR_OprFact::intConst(0), x->operand()); // dummy
 509         return;
 510       }
 511       if (x->op() == Bytecodes::_irem && !is_power_of_2(const_divisor) && const_divisor != -1) {
 512         // Remainder computation would need additional tmp != R0.
 513         right.load_item();
 514       }
 515     }
 516   }
 517 
 518   LIRItem left(x->x(), this);
 519   left.load_item();
 520   rlock_result(x);
 521   if (is_div_rem) {
 522     CodeEmitInfo* info = nullptr; // Null check already done above.
 523     LIR_Opr tmp = FrameMap::R0_opr;
 524     if (x->op() == Bytecodes::_irem) {
 525       __ irem(left.result(), right.result(), x->operand(), tmp, info);
 526     } else if (x->op() == Bytecodes::_idiv) {
 527       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
 528     }
 529   } else {
 530     arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::R0_opr);
 531   }
 532 }
 533 
 534 
 535 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 536   ValueTag tag = x->type()->tag();
 537   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 538   switch (tag) {
 539     case floatTag:
 540     case doubleTag: do_ArithmeticOp_FPU(x);  return;
 541     case longTag:   do_ArithmeticOp_Long(x); return;
 542     case intTag:    do_ArithmeticOp_Int(x);  return;
 543     default: ShouldNotReachHere();
 544   }
 545 }
 546 
 547 
 548 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 549 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 550   LIRItem value(x->x(), this);
 551   LIRItem count(x->y(), this);
 552   value.load_item();
 553   LIR_Opr reg = rlock_result(x);
 554   LIR_Opr mcount;
 555   if (count.result()->is_register()) {
 556     mcount = FrameMap::R0_opr;
 557   } else {
 558     mcount = LIR_OprFact::illegalOpr;
 559   }
 560   shift_op(x->op(), reg, value.result(), count.result(), mcount);
 561 }
 562 
 563 
 564 inline bool can_handle_logic_op_as_uimm(ValueType *type, Bytecodes::Code bc) {
 565   jlong int_or_long_const;
 566   if (type->as_IntConstant()) {
 567     int_or_long_const = type->as_IntConstant()->value();
 568   } else if (type->as_LongConstant()) {
 569     int_or_long_const = type->as_LongConstant()->value();
 570   } else if (type->as_ObjectConstant()) {
 571     return type->as_ObjectConstant()->value()->is_null_object();
 572   } else {
 573     return false;
 574   }
 575 
 576   if (Assembler::is_uimm(int_or_long_const, 16)) return true;
 577   if ((int_or_long_const & 0xFFFF) == 0 &&
 578       Assembler::is_uimm((jlong)((julong)int_or_long_const >> 16), 16)) return true;
 579 
 580   // see Assembler::andi
 581   if (bc == Bytecodes::_iand &&
 582       (is_power_of_2(int_or_long_const+1) ||
 583        is_power_of_2(int_or_long_const) ||
 584        is_power_of_2(-int_or_long_const))) return true;
 585   if (bc == Bytecodes::_land &&
 586       (is_power_of_2((unsigned long)int_or_long_const+1) ||
 587        (Assembler::is_uimm(int_or_long_const, 32) && is_power_of_2(int_or_long_const)) ||
 588        (int_or_long_const != min_jlong && is_power_of_2(-int_or_long_const)))) return true;
 589 
 590   // special case: xor -1
 591   if ((bc == Bytecodes::_ixor || bc == Bytecodes::_lxor) &&
 592       int_or_long_const == -1) return true;
 593   return false;
 594 }
 595 
 596 
 597 // _iand, _land, _ior, _lor, _ixor, _lxor
 598 void LIRGenerator::do_LogicOp(LogicOp* x) {
 599   LIRItem left(x->x(), this);
 600   LIRItem right(x->y(), this);
 601 
 602   left.load_item();
 603 
 604   Value rval = right.value();
 605   LIR_Opr r = rval->operand();
 606   ValueType *type = rval->type();
 607   // Logic instructions use unsigned immediate values.
 608   if (can_handle_logic_op_as_uimm(type, x->op())) {
 609     if (!r->is_constant()) {
 610       r = LIR_OprFact::value_type(type);
 611       rval->set_operand(r);
 612     }
 613     right.set_result(r);
 614   } else {
 615     right.load_item();
 616   }
 617 
 618   LIR_Opr reg = rlock_result(x);
 619 
 620   logic_op(x->op(), reg, left.result(), right.result());
 621 }
 622 
 623 
 624 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 625 void LIRGenerator::do_CompareOp(CompareOp* x) {
 626   LIRItem left(x->x(), this);
 627   LIRItem right(x->y(), this);
 628   left.load_item();
 629   right.load_item();
 630   LIR_Opr reg = rlock_result(x);
 631   if (x->x()->type()->is_float_kind()) {
 632     Bytecodes::Code code = x->op();
 633     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 634   } else if (x->x()->type()->tag() == longTag) {
 635     __ lcmp2int(left.result(), right.result(), reg);
 636   } else {
 637     Unimplemented();
 638   }
 639 }
 640 
 641 
 642 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 643   LIR_Opr result = new_register(T_INT);
 644   LIR_Opr t1 = LIR_OprFact::illegalOpr;
 645   LIR_Opr t2 = LIR_OprFact::illegalOpr;
 646   cmp_value.load_item();
 647   new_value.load_item();
 648 
 649   if (is_reference_type(type)) {
 650     if (UseCompressedOops) {
 651       t1 = new_register(T_OBJECT);
 652       t2 = new_register(T_OBJECT);
 653     }
 654     __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 655   } else if (type == T_INT) {
 656     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 657   } else if (type == T_LONG) {
 658     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 659   } else {
 660     Unimplemented();
 661   }
 662   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 663            result, type);
 664   return result;
 665 }
 666 
 667 
 668 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 669   LIR_Opr result = new_register(type);
 670   LIR_Opr tmp = FrameMap::R0_opr;
 671 
 672   value.load_item();
 673   __ xchg(addr, value.result(), result, tmp);
 674   return result;
 675 }
 676 
 677 
 678 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 679   LIR_Opr result = new_register(type);
 680   LIR_Opr tmp = FrameMap::R0_opr;
 681 
 682   value.load_item();
 683   __ xadd(addr, value.result(), result, tmp);
 684   return result;
 685 }
 686 
 687 
 688 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 689   switch (x->id()) {
 690     case vmIntrinsics::_dabs: {
 691       assert(x->number_of_arguments() == 1, "wrong type");
 692       LIRItem value(x->argument_at(0), this);
 693       value.load_item();
 694       LIR_Opr dst = rlock_result(x);
 695       __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 696       break;
 697     }
 698     case vmIntrinsics::_floatToFloat16: {
 699       assert(x->number_of_arguments() == 1, "wrong type");
 700       LIRItem value(x->argument_at(0), this);
 701       value.load_item();
 702       LIR_Opr dst = rlock_result(x);
 703       LIR_Opr tmp = new_register(T_FLOAT);
 704       __ f2hf(value.result(), dst, tmp);
 705       break;
 706     }
 707     case vmIntrinsics::_float16ToFloat: {
 708       assert(x->number_of_arguments() == 1, "wrong type");
 709       LIRItem value(x->argument_at(0), this);
 710       value.load_item();
 711       LIR_Opr dst = rlock_result(x);
 712       __ hf2f(value.result(), dst, LIR_OprFact::illegalOpr);
 713       break;
 714     }
 715     case vmIntrinsics::_dsqrt:
 716     case vmIntrinsics::_dsqrt_strict: {
 717       assert(x->number_of_arguments() == 1, "wrong type");
 718       LIRItem value(x->argument_at(0), this);
 719       value.load_item();
 720       LIR_Opr dst = rlock_result(x);
 721       __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 722       break;
 723     }
 724     case vmIntrinsics::_dsin:   // fall through
 725     case vmIntrinsics::_dcos:   // fall through
 726     case vmIntrinsics::_dtan:   // fall through
 727     case vmIntrinsics::_dlog:   // fall through
 728     case vmIntrinsics::_dlog10: // fall through
 729     case vmIntrinsics::_dexp: {
 730       assert(x->number_of_arguments() == 1, "wrong type");
 731 
 732       address runtime_entry = nullptr;
 733       switch (x->id()) {
 734         case vmIntrinsics::_dsin:
 735           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 736           break;
 737         case vmIntrinsics::_dcos:
 738           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 739           break;
 740         case vmIntrinsics::_dtan:
 741           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 742           break;
 743         case vmIntrinsics::_dlog:
 744           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 745           break;
 746         case vmIntrinsics::_dlog10:
 747           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 748           break;
 749         case vmIntrinsics::_dexp:
 750           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 751           break;
 752         default:
 753           ShouldNotReachHere();
 754       }
 755 
 756       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), nullptr);
 757       set_result(x, result);
 758       break;
 759     }
 760     case vmIntrinsics::_dpow: {
 761       assert(x->number_of_arguments() == 2, "wrong type");
 762       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 763       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), nullptr);
 764       set_result(x, result);
 765       break;
 766     }
 767     default:
 768       break;
 769   }
 770 }
 771 
 772 
 773 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 774   assert(x->number_of_arguments() == 5, "wrong type");
 775 
 776   // Make all state_for calls early since they can emit code.
 777   CodeEmitInfo* info = nullptr;
 778   if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
 779     info = state_for(x, x->state_before());
 780     info->set_force_reexecute();
 781   } else {
 782     info = state_for(x, x->state());
 783   }
 784 
 785   LIRItem src     (x->argument_at(0), this);
 786   LIRItem src_pos (x->argument_at(1), this);
 787   LIRItem dst     (x->argument_at(2), this);
 788   LIRItem dst_pos (x->argument_at(3), this);
 789   LIRItem length  (x->argument_at(4), this);
 790 
 791   // Load all values in callee_save_registers (C calling convention),
 792   // as this makes the parameter passing to the fast case simpler.
 793   src.load_item_force     (FrameMap::R14_oop_opr);
 794   src_pos.load_item_force (FrameMap::R15_opr);
 795   dst.load_item_force     (FrameMap::R17_oop_opr);
 796   dst_pos.load_item_force (FrameMap::R18_opr);
 797   length.load_item_force  (FrameMap::R19_opr);
 798   LIR_Opr tmp =            FrameMap::R20_opr;
 799 
 800   int flags;
 801   ciArrayKlass* expected_type;
 802   arraycopy_helper(x, &flags, &expected_type);
 803   if (x->check_flag(Instruction::OmitChecksFlag)) {
 804     flags = 0;
 805   }
 806   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
 807                length.result(), tmp,
 808                expected_type, flags, info);
 809   set_no_result(x);
 810 }
 811 
 812 
 813 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 814 // _i2b, _i2c, _i2s
 815 void LIRGenerator::do_Convert(Convert* x) {
 816 
 817   // Register conversion.
 818   LIRItem value(x->value(), this);
 819   LIR_Opr reg = rlock_result(x);
 820   value.load_item();
 821   switch (x->op()) {
 822     case Bytecodes::_f2l:
 823     case Bytecodes::_d2l:
 824     case Bytecodes::_f2i:
 825     case Bytecodes::_d2i: value.set_destroys_register(); break; // USE_KILL
 826     default: break;
 827   }
 828   __ convert(x->op(), value.result(), reg);
 829 }
 830 
 831 
 832 void LIRGenerator::do_NewInstance(NewInstance* x) {
 833   // This instruction can be deoptimized in the slow path.
 834   const LIR_Opr reg = result_register_for(x->type());
 835 #ifndef PRODUCT
 836   if (PrintNotLoaded && !x->klass()->is_loaded()) {
 837     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
 838   }
 839 #endif
 840   CodeEmitInfo* info = state_for(x, x->state());
 841   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewInstanceStub).
 842   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
 843   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
 844   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
 845   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
 846   new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
 847 
 848   // Must prevent reordering of stores for object initialization
 849   // with stores that publish the new object.
 850   __ membar_storestore();
 851   LIR_Opr result = rlock_result(x);
 852   __ move(reg, result);
 853 }
 854 
 855 
 856 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 857   // Evaluate state_for early since it may emit code.
 858   CodeEmitInfo* info = nullptr;
 859   if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
 860     info = state_for(x, x->state_before());
 861     info->set_force_reexecute();
 862   } else {
 863     info = state_for(x, x->state());
 864   }
 865 
 866   LIRItem length(x->length(), this);
 867   length.load_item();
 868 
 869   LIR_Opr reg = result_register_for(x->type());
 870   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewTypeArrayStub).
 871   // We use R5 in order to get a temp effect. This reg is used in slow path (NewTypeArrayStub).
 872   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
 873   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
 874   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
 875   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
 876   LIR_Opr len = length.result();
 877   BasicType elem_type = x->elt_type();
 878 
 879   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 880 
 881   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
 882   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array());
 883 
 884   // Must prevent reordering of stores for object initialization
 885   // with stores that publish the new object.
 886   __ membar_storestore();
 887   LIR_Opr result = rlock_result(x);
 888   __ move(reg, result);
 889 }
 890 
 891 
 892 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
 893   // Evaluate state_for early since it may emit code.
 894   CodeEmitInfo* info = state_for(x, x->state());
 895   // In case of patching (i.e., object class is not yet loaded),
 896   // we need to reexecute the instruction and therefore provide
 897   // the state before the parameters have been consumed.
 898   CodeEmitInfo* patching_info = nullptr;
 899   if (!x->klass()->is_loaded() || PatchALot) {
 900     patching_info = state_for(x, x->state_before());
 901   }
 902 
 903   LIRItem length(x->length(), this);
 904   length.load_item();
 905 
 906   const LIR_Opr reg = result_register_for(x->type());
 907   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewObjectArrayStub).
 908   // We use R5 in order to get a temp effect. This reg is used in slow path (NewObjectArrayStub).
 909   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
 910   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
 911   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
 912   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
 913   LIR_Opr len = length.result();
 914 
 915   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
 916   ciMetadata* obj = ciObjArrayKlass::make(x->klass());
 917   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
 918     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
 919   }
 920   klass2reg_with_patching(klass_reg, obj, patching_info);
 921   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
 922 
 923   // Must prevent reordering of stores for object initialization
 924   // with stores that publish the new object.
 925   __ membar_storestore();
 926   LIR_Opr result = rlock_result(x);
 927   __ move(reg, result);
 928 }
 929 
 930 
 931 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
 932   Values* dims = x->dims();
 933   int i = dims->length();
 934   LIRItemList* items = new LIRItemList(i, i, nullptr);
 935   while (i-- > 0) {
 936     LIRItem* size = new LIRItem(dims->at(i), this);
 937     items->at_put(i, size);
 938   }
 939 
 940   // Evaluate state_for early since it may emit code.
 941   CodeEmitInfo* patching_info = nullptr;
 942   if (!x->klass()->is_loaded() || PatchALot) {
 943     patching_info = state_for(x, x->state_before());
 944 
 945     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
 946     // clone all handlers (NOTE: Usually this is handled transparently
 947     // by the CodeEmitInfo cloning logic in CodeStub constructors but
 948     // is done explicitly here because a stub isn't being used).
 949     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
 950   }
 951   CodeEmitInfo* info = state_for(x, x->state());
 952 
 953   i = dims->length();
 954   while (i-- > 0) {
 955     LIRItem* size = items->at(i);
 956     size->load_nonconstant();
 957     // FrameMap::_reserved_argument_area_size includes the dimensions
 958     // varargs, because it's initialized to hir()->max_stack() when the
 959     // FrameMap is created.
 960     store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
 961   }
 962 
 963   const LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path.
 964   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
 965 
 966   LIR_Opr rank = FrameMap::R5_opr; // Used by slow path.
 967   __ move(LIR_OprFact::intConst(x->rank()), rank);
 968 
 969   LIR_Opr varargs = FrameMap::as_pointer_opr(R6); // Used by slow path.
 970   __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
 971           varargs);
 972 
 973   // Note: This instruction can be deoptimized in the slow path.
 974   LIR_OprList* args = new LIR_OprList(3);
 975   args->append(klass_reg);
 976   args->append(rank);
 977   args->append(varargs);
 978   const LIR_Opr reg = result_register_for(x->type());
 979   __ call_runtime(Runtime1::entry_for(StubId::c1_new_multi_array_id),
 980                   LIR_OprFact::illegalOpr,
 981                   reg, args, info);
 982 
 983   // Must prevent reordering of stores for object initialization
 984   // with stores that publish the new object.
 985   __ membar_storestore();
 986   LIR_Opr result = rlock_result(x);
 987   __ move(reg, result);
 988 }
 989 
 990 
 991 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
 992   // nothing to do for now
 993 }
 994 
 995 
 996 void LIRGenerator::do_CheckCast(CheckCast* x) {
 997   LIRItem obj(x->obj(), this);
 998   CodeEmitInfo* patching_info = nullptr;
 999   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1000     // Must do this before locking the destination register as
1001     // an oop register, and before the obj is loaded (so x->obj()->item()
1002     // is valid for creating a debug info location).
1003     patching_info = state_for(x, x->state_before());
1004   }
1005   obj.load_item();
1006   LIR_Opr out_reg = rlock_result(x);
1007   CodeStub* stub;
1008   CodeEmitInfo* info_for_exception =
1009       (x->needs_exception_state() ? state_for(x) :
1010                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1011 
1012   if (x->is_incompatible_class_change_check()) {
1013     assert(patching_info == nullptr, "can't patch this");
1014     stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id,
1015                                    LIR_OprFact::illegalOpr, info_for_exception);
1016   } else if (x->is_invokespecial_receiver_check()) {
1017     assert(patching_info == nullptr, "can't patch this");
1018     stub = new DeoptimizeStub(info_for_exception,
1019                               Deoptimization::Reason_class_check,
1020                               Deoptimization::Action_none);
1021   } else {
1022     stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception);
1023   }
1024   // Following registers are used by slow_subtype_check:
1025   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1026   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1027   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1028   __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1029                x->direct_compare(), info_for_exception, patching_info, stub,
1030                x->profiled_method(), x->profiled_bci());
1031 }
1032 
1033 
1034 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1035   LIRItem obj(x->obj(), this);
1036   CodeEmitInfo* patching_info = nullptr;
1037   if (!x->klass()->is_loaded() || PatchALot) {
1038     patching_info = state_for(x, x->state_before());
1039   }
1040   // Ensure the result register is not the input register because the
1041   // result is initialized before the patching safepoint.
1042   obj.load_item();
1043   LIR_Opr out_reg = rlock_result(x);
1044   // Following registers are used by slow_subtype_check:
1045   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1046   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1047   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1048   __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1049                 x->direct_compare(), patching_info,
1050                 x->profiled_method(), x->profiled_bci());
1051 }
1052 
1053 
1054 // Intrinsic for Class::isInstance
1055 address LIRGenerator::isInstance_entry() {
1056   return Runtime1::entry_for(StubId::c1_is_instance_of_id);
1057 }
1058 
1059 
1060 void LIRGenerator::do_If(If* x) {
1061   assert(x->number_of_sux() == 2, "inconsistency");
1062   ValueTag tag = x->x()->type()->tag();
1063   LIRItem xitem(x->x(), this);
1064   LIRItem yitem(x->y(), this);
1065   LIRItem* xin = &xitem;
1066   LIRItem* yin = &yitem;
1067   If::Condition cond = x->cond();
1068 
1069   LIR_Opr left = LIR_OprFact::illegalOpr;
1070   LIR_Opr right = LIR_OprFact::illegalOpr;
1071 
1072   xin->load_item();
1073   left = xin->result();
1074 
1075   if (yin->result()->is_constant() && yin->result()->type() == T_INT &&
1076       Assembler::is_simm16(yin->result()->as_constant_ptr()->as_jint())) {
1077     // Inline int constants which are small enough to be immediate operands.
1078     right = LIR_OprFact::value_type(yin->value()->type());
1079   } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1080              (cond == If::eql || cond == If::neq)) {
1081     // Inline long zero.
1082     right = LIR_OprFact::value_type(yin->value()->type());
1083   } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
1084     right = LIR_OprFact::value_type(yin->value()->type());
1085   } else {
1086     yin->load_item();
1087     right = yin->result();
1088   }
1089   set_no_result(x);
1090 
1091   // Add safepoint before generating condition code so it can be recomputed.
1092   if (x->is_safepoint()) {
1093     // Increment backedge counter if needed.
1094     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1095         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1096     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1097   }
1098 
1099   __ cmp(lir_cond(cond), left, right);
1100   // Generate branch profiling. Profiling code doesn't kill flags.
1101   profile_branch(x, cond);
1102   move_to_phi(x->state());
1103   if (x->x()->type()->is_float_kind()) {
1104     __ branch(lir_cond(cond), x->tsux(), x->usux());
1105   } else {
1106     __ branch(lir_cond(cond), x->tsux());
1107   }
1108   assert(x->default_sux() == x->fsux(), "wrong destination above");
1109   __ jump(x->default_sux());
1110 }
1111 
1112 
1113 LIR_Opr LIRGenerator::getThreadPointer() {
1114   return FrameMap::as_pointer_opr(R16_thread);
1115 }
1116 
1117 
1118 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1119   LIR_Opr arg1 = FrameMap::R3_opr; // ARG1
1120   __ move(LIR_OprFact::intConst(block->block_id()), arg1);
1121   LIR_OprList* args = new LIR_OprList(1);
1122   args->append(arg1);
1123   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1124   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1125 }
1126 
1127 
1128 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1129                                         CodeEmitInfo* info) {
1130 #ifdef _LP64
1131   __ store(value, address, info);
1132 #else
1133   Unimplemented();
1134 //  __ volatile_store_mem_reg(value, address, info);
1135 #endif
1136 }
1137 
1138 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1139                                        CodeEmitInfo* info) {
1140 #ifdef _LP64
1141   __ load(address, result, info);
1142 #else
1143   Unimplemented();
1144 //  __ volatile_load_mem_reg(address, result, info);
1145 #endif
1146   __ membar_acquire();
1147 }
1148 
1149 
1150 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1151   assert(UseCRC32Intrinsics, "or should not be here");
1152   LIR_Opr result = rlock_result(x);
1153 
1154   switch (x->id()) {
1155     case vmIntrinsics::_updateCRC32: {
1156       LIRItem crc(x->argument_at(0), this);
1157       LIRItem val(x->argument_at(1), this);
1158       // Registers destroyed by update_crc32.
1159       crc.set_destroys_register();
1160       val.set_destroys_register();
1161       crc.load_item();
1162       val.load_item();
1163       __ update_crc32(crc.result(), val.result(), result);
1164       break;
1165     }
1166     case vmIntrinsics::_updateBytesCRC32:
1167     case vmIntrinsics::_updateByteBufferCRC32: {
1168       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1169 
1170       LIRItem crc(x->argument_at(0), this);
1171       LIRItem buf(x->argument_at(1), this);
1172       LIRItem off(x->argument_at(2), this);
1173       LIRItem len(x->argument_at(3), this);
1174       buf.load_item();
1175       off.load_nonconstant();
1176 
1177       LIR_Opr index = off.result();
1178       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1179       if (off.result()->is_constant()) {
1180         index = LIR_OprFact::illegalOpr;
1181         offset += off.result()->as_jint();
1182       }
1183       LIR_Opr base_op = buf.result();
1184       LIR_Address* a = nullptr;
1185 
1186       if (index->is_valid()) {
1187         LIR_Opr tmp = new_register(T_LONG);
1188         __ convert(Bytecodes::_i2l, index, tmp);
1189         index = tmp;
1190         __ add(index, LIR_OprFact::intptrConst(offset), index);
1191         a = new LIR_Address(base_op, index, T_BYTE);
1192       } else {
1193         a = new LIR_Address(base_op, offset, T_BYTE);
1194       }
1195 
1196       BasicTypeList signature(3);
1197       signature.append(T_INT);
1198       signature.append(T_ADDRESS);
1199       signature.append(T_INT);
1200       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1201       const LIR_Opr result_reg = result_register_for(x->type());
1202 
1203       LIR_Opr arg1 = cc->at(0),
1204               arg2 = cc->at(1),
1205               arg3 = cc->at(2);
1206 
1207       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1208       __ leal(LIR_OprFact::address(a), arg2);
1209       len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
1210 
1211       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1212       __ move(result_reg, result);
1213       break;
1214     }
1215     default: {
1216       ShouldNotReachHere();
1217     }
1218   }
1219 }
1220 
1221 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1222   assert(UseCRC32CIntrinsics, "or should not be here");
1223   LIR_Opr result = rlock_result(x);
1224 
1225   switch (x->id()) {
1226     case vmIntrinsics::_updateBytesCRC32C:
1227     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1228       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1229 
1230       LIRItem crc(x->argument_at(0), this);
1231       LIRItem buf(x->argument_at(1), this);
1232       LIRItem off(x->argument_at(2), this);
1233       LIRItem end(x->argument_at(3), this);
1234       buf.load_item();
1235       off.load_nonconstant();
1236       end.load_nonconstant();
1237 
1238       // len = end - off
1239       LIR_Opr len  = end.result();
1240       LIR_Opr tmpA = new_register(T_INT);
1241       LIR_Opr tmpB = new_register(T_INT);
1242       __ move(end.result(), tmpA);
1243       __ move(off.result(), tmpB);
1244       __ sub(tmpA, tmpB, tmpA);
1245       len = tmpA;
1246 
1247       LIR_Opr index = off.result();
1248       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1249       if (off.result()->is_constant()) {
1250         index = LIR_OprFact::illegalOpr;
1251         offset += off.result()->as_jint();
1252       }
1253       LIR_Opr base_op = buf.result();
1254       LIR_Address* a = nullptr;
1255 
1256       if (index->is_valid()) {
1257         LIR_Opr tmp = new_register(T_LONG);
1258         __ convert(Bytecodes::_i2l, index, tmp);
1259         index = tmp;
1260         __ add(index, LIR_OprFact::intptrConst(offset), index);
1261         a = new LIR_Address(base_op, index, T_BYTE);
1262       } else {
1263         a = new LIR_Address(base_op, offset, T_BYTE);
1264       }
1265 
1266       BasicTypeList signature(3);
1267       signature.append(T_INT);
1268       signature.append(T_ADDRESS);
1269       signature.append(T_INT);
1270       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1271       const LIR_Opr result_reg = result_register_for(x->type());
1272 
1273       LIR_Opr arg1 = cc->at(0),
1274               arg2 = cc->at(1),
1275               arg3 = cc->at(2);
1276 
1277       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
1278       __ leal(LIR_OprFact::address(a), arg2);
1279       __ move(len, cc->at(2));   // We skip int->long conversion here, because CRC32C stub expects int.
1280 
1281       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1282       __ move(result_reg, result);
1283       break;
1284     }
1285     default: {
1286       ShouldNotReachHere();
1287     }
1288   }
1289 }
1290 
1291 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1292   assert(x->number_of_arguments() == 3, "wrong type");
1293   assert(UseFMA, "Needs FMA instructions support.");
1294   LIRItem value(x->argument_at(0), this);
1295   LIRItem value1(x->argument_at(1), this);
1296   LIRItem value2(x->argument_at(2), this);
1297 
1298   value.load_item();
1299   value1.load_item();
1300   value2.load_item();
1301 
1302   LIR_Opr calc_input = value.result();
1303   LIR_Opr calc_input1 = value1.result();
1304   LIR_Opr calc_input2 = value2.result();
1305   LIR_Opr calc_result = rlock_result(x);
1306 
1307   switch (x->id()) {
1308   case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1309   case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1310   default:                  ShouldNotReachHere();
1311   }
1312 }
1313 
1314 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1315   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1316 }