1 /*
   2  * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArray.hpp"
  35 #include "ci/ciObjArrayKlass.hpp"
  36 #include "ci/ciTypeArrayKlass.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "vmreg_s390.inline.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 
  42 #ifdef ASSERT
  43 #define __ gen()->lir(__FILE__, __LINE__)->
  44 #else
  45 #define __ gen()->lir()->
  46 #endif
  47 
  48 void LIRItem::load_byte_item() {
  49   // Byte loads use same registers as other loads.
  50   load_item();
  51 }
  52 
  53 void LIRItem::load_nonconstant(int bits) {
  54   LIR_Opr r = value()->operand();
  55   if (_gen->can_inline_as_constant(value(), bits)) {
  56     if (!r->is_constant()) {
  57       r = LIR_OprFact::value_type(value()->type());
  58     }
  59     _result = r;
  60   } else {
  61     load_item();
  62   }
  63 }
  64 
  65 //--------------------------------------------------------------
  66 //               LIRGenerator
  67 //--------------------------------------------------------------
  68 
  69 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::as_oop_opr(Z_EXC_OOP); }
  70 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::as_opr(Z_EXC_PC); }
  71 LIR_Opr LIRGenerator::divInOpr()        { return FrameMap::Z_R11_opr; }
  72 LIR_Opr LIRGenerator::divOutOpr()       { return FrameMap::Z_R11_opr; }
  73 LIR_Opr LIRGenerator::remOutOpr()       { return FrameMap::Z_R10_opr; }
  74 LIR_Opr LIRGenerator::ldivInOpr()       { return FrameMap::Z_R11_long_opr; }
  75 LIR_Opr LIRGenerator::ldivOutOpr()      { return FrameMap::Z_R11_long_opr; }
  76 LIR_Opr LIRGenerator::lremOutOpr()      { return FrameMap::Z_R10_long_opr; }
  77 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
  78 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::Z_R13_opr; }
  79 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  80 
  81 LIR_Opr LIRGenerator::result_register_for (ValueType* type, bool callee) {
  82   LIR_Opr opr;
  83   switch (type->tag()) {
  84     case intTag:    opr = FrameMap::Z_R2_opr;        break;
  85     case objectTag: opr = FrameMap::Z_R2_oop_opr;    break;
  86     case longTag:   opr = FrameMap::Z_R2_long_opr;   break;
  87     case floatTag:  opr = FrameMap::Z_F0_opr;        break;
  88     case doubleTag: opr = FrameMap::Z_F0_double_opr; break;
  89 
  90     case addressTag:
  91     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  92   }
  93 
  94   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  95   return opr;
  96 }
  97 
  98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
  99   return new_register(T_INT);
 100 }
 101 
 102 //--------- Loading items into registers. --------------------------------
 103 
 104 // z/Architecture cannot inline all constants.
 105 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 106   if (v->type()->as_IntConstant() != NULL) {
 107     return Immediate::is_simm16(v->type()->as_IntConstant()->value());
 108   } else if (v->type()->as_LongConstant() != NULL) {
 109     return Immediate::is_simm16(v->type()->as_LongConstant()->value());
 110   } else if (v->type()->as_ObjectConstant() != NULL) {
 111     return v->type()->as_ObjectConstant()->value()->is_null_object();
 112   } else {
 113     return false;
 114   }
 115 }
 116 
 117 bool LIRGenerator::can_inline_as_constant(Value i, int bits) const {
 118   if (i->type()->as_IntConstant() != NULL) {
 119     return Assembler::is_simm(i->type()->as_IntConstant()->value(), bits);
 120   } else if (i->type()->as_LongConstant() != NULL) {
 121     return Assembler::is_simm(i->type()->as_LongConstant()->value(), bits);
 122   } else {
 123     return can_store_as_constant(i, as_BasicType(i->type()));
 124   }
 125 }
 126 
 127 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
 128   if (c->type() == T_INT) {
 129     return Immediate::is_simm20(c->as_jint());
 130   } else   if (c->type() == T_LONG) {
 131     return Immediate::is_simm20(c->as_jlong());
 132   }
 133   return false;
 134 }
 135 
 136 LIR_Opr LIRGenerator::safepoint_poll_register() {
 137   return new_register(longType);
 138 }
 139 
 140 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 141                                             int shift, int disp, BasicType type) {
 142   assert(base->is_register(), "must be");
 143   if (index->is_constant()) {
 144     intx large_disp = disp;
 145     LIR_Const *constant = index->as_constant_ptr();
 146     if (constant->type() == T_LONG) {
 147       large_disp += constant->as_jlong() << shift;
 148     } else {
 149       large_disp += (intx)(constant->as_jint()) << shift;
 150     }
 151     if (Displacement::is_validDisp(large_disp)) {
 152       return new LIR_Address(base, large_disp, type);
 153     }
 154     // Index is illegal so replace it with the displacement loaded into a register.
 155     index = new_pointer_register();
 156     __ move(LIR_OprFact::intptrConst(large_disp), index);
 157     return new LIR_Address(base, index, type);
 158   } else {
 159     if (shift > 0) {
 160       LIR_Opr tmp = new_pointer_register();
 161       __ shift_left(index, shift, tmp);
 162       index = tmp;
 163     }
 164     return new LIR_Address(base, index, disp, type);
 165   }
 166 }
 167 
 168 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 169                                               BasicType type) {
 170   int elem_size = type2aelembytes(type);
 171   int shift = exact_log2(elem_size);
 172   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 173 
 174   LIR_Address* addr;
 175   if (index_opr->is_constant()) {
 176     addr = new LIR_Address(array_opr,
 177                            offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 178   } else {
 179     if (index_opr->type() == T_INT) {
 180       LIR_Opr tmp = new_register(T_LONG);
 181       __ convert(Bytecodes::_i2l, index_opr, tmp);
 182       index_opr = tmp;
 183     }
 184     if (shift > 0) {
 185       __ shift_left(index_opr, shift, index_opr);
 186     }
 187     addr = new LIR_Address(array_opr,
 188                            index_opr,
 189                            offset_in_bytes, type);
 190   }
 191   return addr;
 192 }
 193 
 194 LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) {
 195   LIR_Opr r = LIR_OprFact::illegalOpr;
 196   if (type == T_LONG) {
 197     r = LIR_OprFact::longConst(x);
 198   } else if (type == T_INT) {
 199     r = LIR_OprFact::intConst(checked_cast<jint>(x));
 200   } else {
 201     ShouldNotReachHere();
 202   }
 203   return r;
 204 }
 205 
 206 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 207   LIR_Opr pointer = new_pointer_register();
 208   __ move(LIR_OprFact::intptrConst(counter), pointer);
 209   LIR_Address* addr = new LIR_Address(pointer, type);
 210   increment_counter(addr, step);
 211 }
 212 
 213 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 214   __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
 215 }
 216 
 217 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 218   LIR_Opr scratch = FrameMap::Z_R1_opr;
 219   __ load(new LIR_Address(base, disp, T_INT), scratch, info);
 220   __ cmp(condition, scratch, c);
 221 }
 222 
 223 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 224   __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
 225 }
 226 
 227 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
 228   if (tmp->is_valid()) {
 229     if (is_power_of_2(c + 1)) {
 230       __ move(left, tmp);
 231       __ shift_left(left, log2i_exact(c + 1), left);
 232       __ sub(left, tmp, result);
 233       return true;
 234     } else if (is_power_of_2(c - 1)) {
 235       __ move(left, tmp);
 236       __ shift_left(left, log2i_exact(c - 1), left);
 237       __ add(left, tmp, result);
 238       return true;
 239     }
 240   }
 241   return false;
 242 }
 243 
 244 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 245   BasicType type = item->type();
 246   __ store(item, new LIR_Address(FrameMap::Z_SP_opr, in_bytes(offset_from_sp), type));
 247 }
 248 
 249 //----------------------------------------------------------------------
 250 //             visitor functions
 251 //----------------------------------------------------------------------
 252 
 253 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 254   LIR_Opr tmp1 = new_register(objectType);
 255   LIR_Opr tmp2 = new_register(objectType);
 256   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 257   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 258 }
 259 
 260 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 261   assert(x->is_pinned(),"");
 262   LIRItem obj(x->obj(), this);
 263   obj.load_item();
 264 
 265   set_no_result(x);
 266 
 267   // "lock" stores the address of the monitor stack slot, so this is not an oop.
 268   LIR_Opr lock = new_register(T_INT);
 269 
 270   CodeEmitInfo* info_for_exception = NULL;
 271   if (x->needs_null_check()) {
 272     info_for_exception = state_for (x);
 273   }
 274   // This CodeEmitInfo must not have the xhandlers because here the
 275   // object is already locked (xhandlers expect object to be unlocked).
 276   CodeEmitInfo* info = state_for (x, x->state(), true);
 277   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 278                 x->monitor_no(), info_for_exception, info);
 279 }
 280 
 281 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 282   assert(x->is_pinned(),"");
 283 
 284   LIRItem obj(x->obj(), this);
 285   obj.dont_load_item();
 286 
 287   LIR_Opr lock = new_register(T_INT);
 288   LIR_Opr obj_temp = new_register(T_INT);
 289   set_no_result(x);
 290   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 291 }
 292 
 293 // _ineg, _lneg, _fneg, _dneg
 294 void LIRGenerator::do_NegateOp(NegateOp* x) {
 295   LIRItem value(x->x(), this);
 296   value.load_item();
 297   LIR_Opr reg = rlock_result(x);
 298   __ negate(value.result(), reg);
 299 }
 300 
 301 // for _fadd, _fmul, _fsub, _fdiv, _frem
 302 //     _dadd, _dmul, _dsub, _ddiv, _drem
 303 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 304   LIRItem left(x->x(),  this);
 305   LIRItem right(x->y(), this);
 306   LIRItem* left_arg  = &left;
 307   LIRItem* right_arg = &right;
 308   assert(!left.is_stack(), "can't both be memory operands");
 309   left.load_item();
 310 
 311   if (right.is_register() || right.is_constant()) {
 312     right.load_item();
 313   } else {
 314     right.dont_load_item();
 315   }
 316 
 317   if ((x->op() == Bytecodes::_frem) || (x->op() == Bytecodes::_drem)) {
 318     address entry;
 319     switch (x->op()) {
 320     case Bytecodes::_frem:
 321       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 322       break;
 323     case Bytecodes::_drem:
 324       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 325       break;
 326     default:
 327       ShouldNotReachHere();
 328     }
 329     LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
 330     set_result(x, result);
 331   } else {
 332     LIR_Opr reg = rlock(x);
 333     LIR_Opr tmp = LIR_OprFact::illegalOpr;
 334     arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp);
 335     set_result(x, reg);
 336   }
 337 }
 338 
 339 // for _ladd, _lmul, _lsub, _ldiv, _lrem
 340 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 341   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 342     // Use shifts if divisor is a power of 2 otherwise use DSGR instruction.
 343     // Instruction: DSGR R1, R2
 344     // input : R1+1: dividend   (R1, R1+1 designate a register pair, R1 must be even)
 345     //         R2:   divisor
 346     //
 347     // output: R1+1: quotient
 348     //         R1:   remainder
 349     //
 350     // Register selection: R1:   Z_R10
 351     //                     R1+1: Z_R11
 352     //                     R2:   to be chosen by register allocator (linear scan)
 353 
 354     // R1, and R1+1 will be destroyed.
 355 
 356     LIRItem right(x->y(), this);
 357     LIRItem left(x->x() , this);   // Visit left second, so that the is_register test is valid.
 358 
 359     // Call state_for before load_item_force because state_for may
 360     // force the evaluation of other instructions that are needed for
 361     // correct debug info. Otherwise the live range of the fix
 362     // register might be too long.
 363     CodeEmitInfo* info = state_for (x);
 364 
 365     LIR_Opr result = rlock_result(x);
 366     LIR_Opr result_reg = result;
 367     LIR_Opr tmp = LIR_OprFact::illegalOpr;
 368     LIR_Opr divisor_opr = right.result();
 369     if (divisor_opr->is_constant() && is_power_of_2(divisor_opr->as_jlong())) {
 370       left.load_item();
 371       right.dont_load_item();
 372     } else {
 373       left.load_item_force(ldivInOpr());
 374       right.load_item();
 375 
 376       // DSGR instruction needs register pair.
 377       if (x->op() == Bytecodes::_ldiv) {
 378         result_reg = ldivOutOpr();
 379         tmp        = lremOutOpr();
 380       } else {
 381         result_reg = lremOutOpr();
 382         tmp        = ldivOutOpr();
 383       }
 384     }
 385 
 386     if (!ImplicitDiv0Checks) {
 387       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 388       __ branch(lir_cond_equal, new DivByZeroStub(info));
 389       // Idiv/irem cannot trap (passing info would generate an assertion).
 390       info = NULL;
 391     }
 392 
 393     if (x->op() == Bytecodes::_lrem) {
 394       __ irem(left.result(), right.result(), result_reg, tmp, info);
 395     } else if (x->op() == Bytecodes::_ldiv) {
 396       __ idiv(left.result(), right.result(), result_reg, tmp, info);
 397     } else {
 398       ShouldNotReachHere();
 399     }
 400 
 401     if (result_reg != result) {
 402       __ move(result_reg, result);
 403     }
 404   } else {
 405     LIRItem left(x->x(), this);
 406     LIRItem right(x->y(), this);
 407 
 408     left.load_item();
 409     right.load_nonconstant(32);
 410     rlock_result(x);
 411     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 412   }
 413 }
 414 
 415 // for: _iadd, _imul, _isub, _idiv, _irem
 416 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 417   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 418     // Use shifts if divisor is a power of 2 otherwise use DSGFR instruction.
 419     // Instruction: DSGFR R1, R2
 420     // input : R1+1: dividend   (R1, R1+1 designate a register pair, R1 must be even)
 421     //         R2:   divisor
 422     //
 423     // output: R1+1: quotient
 424     //         R1:   remainder
 425     //
 426     // Register selection: R1:   Z_R10
 427     //                     R1+1: Z_R11
 428     //                     R2:   To be chosen by register allocator (linear scan).
 429 
 430     // R1, and R1+1 will be destroyed.
 431 
 432     LIRItem right(x->y(), this);
 433     LIRItem left(x->x() , this);   // Visit left second, so that the is_register test is valid.
 434 
 435     // Call state_for before load_item_force because state_for may
 436     // force the evaluation of other instructions that are needed for
 437     // correct debug info. Otherwise the live range of the fix
 438     // register might be too long.
 439     CodeEmitInfo* info = state_for (x);
 440 
 441     LIR_Opr result = rlock_result(x);
 442     LIR_Opr result_reg = result;
 443     LIR_Opr tmp = LIR_OprFact::illegalOpr;
 444     LIR_Opr divisor_opr = right.result();
 445     if (divisor_opr->is_constant() && is_power_of_2(divisor_opr->as_jint())) {
 446       left.load_item();
 447       right.dont_load_item();
 448     } else {
 449       left.load_item_force(divInOpr());
 450       right.load_item();
 451 
 452       // DSGFR instruction needs register pair.
 453       if (x->op() == Bytecodes::_idiv) {
 454         result_reg = divOutOpr();
 455         tmp        = remOutOpr();
 456       } else {
 457         result_reg = remOutOpr();
 458         tmp        = divOutOpr();
 459       }
 460     }
 461 
 462     if (!ImplicitDiv0Checks) {
 463       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
 464       __ branch(lir_cond_equal, new DivByZeroStub(info));
 465       // Idiv/irem cannot trap (passing info would generate an assertion).
 466       info = NULL;
 467     }
 468 
 469     if (x->op() == Bytecodes::_irem) {
 470       __ irem(left.result(), right.result(), result_reg, tmp, info);
 471     } else if (x->op() == Bytecodes::_idiv) {
 472       __ idiv(left.result(), right.result(), result_reg, tmp, info);
 473     } else {
 474       ShouldNotReachHere();
 475     }
 476 
 477     if (result_reg != result) {
 478       __ move(result_reg, result);
 479     }
 480   } else {
 481     LIRItem left(x->x(),  this);
 482     LIRItem right(x->y(), this);
 483     LIRItem* left_arg = &left;
 484     LIRItem* right_arg = &right;
 485     if (x->is_commutative() && left.is_stack() && right.is_register()) {
 486       // swap them if left is real stack (or cached) and right is real register(not cached)
 487       left_arg = &right;
 488       right_arg = &left;
 489     }
 490 
 491     left_arg->load_item();
 492 
 493     // Do not need to load right, as we can handle stack and constants.
 494     if (x->op() == Bytecodes::_imul) {
 495       bool use_tmp = false;
 496       if (right_arg->is_constant()) {
 497         int iconst = right_arg->get_jint_constant();
 498         if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) {
 499           use_tmp = true;
 500         }
 501       }
 502       right_arg->dont_load_item();
 503       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 504       if (use_tmp) {
 505         tmp = new_register(T_INT);
 506       }
 507       rlock_result(x);
 508 
 509       arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
 510     } else {
 511       right_arg->dont_load_item();
 512       rlock_result(x);
 513       LIR_Opr tmp = LIR_OprFact::illegalOpr;
 514       arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
 515     }
 516   }
 517 }
 518 
 519 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 520   // If an operand with use count 1 is the left operand, then it is
 521   // likely that no move for 2-operand-LIR-form is necessary.
 522   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 523     x->swap_operands();
 524   }
 525 
 526   ValueTag tag = x->type()->tag();
 527   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 528   switch (tag) {
 529     case floatTag:
 530     case doubleTag: do_ArithmeticOp_FPU(x);  return;
 531     case longTag:   do_ArithmeticOp_Long(x); return;
 532     case intTag:    do_ArithmeticOp_Int(x);  return;
 533     default:
 534       ShouldNotReachHere();
 535   }
 536 }
 537 
 538 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 539 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 540   // count must always be in rcx
 541   LIRItem value(x->x(), this);
 542   LIRItem count(x->y(), this);
 543 
 544   ValueTag elemType = x->type()->tag();
 545   bool must_load_count = !count.is_constant();
 546   if (must_load_count) {
 547     count.load_item();
 548   } else {
 549     count.dont_load_item();
 550   }
 551   value.load_item();
 552   LIR_Opr reg = rlock_result(x);
 553 
 554   shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
 555 }
 556 
 557 // _iand, _land, _ior, _lor, _ixor, _lxor
 558 void LIRGenerator::do_LogicOp(LogicOp* x) {
 559   // IF an operand with use count 1 is the left operand, then it is
 560   // likely that no move for 2-operand-LIR-form is necessary.
 561   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 562     x->swap_operands();
 563   }
 564 
 565   LIRItem left(x->x(), this);
 566   LIRItem right(x->y(), this);
 567 
 568   left.load_item();
 569   right.load_nonconstant(32);
 570   LIR_Opr reg = rlock_result(x);
 571 
 572   logic_op(x->op(), reg, left.result(), right.result());
 573 }
 574 
 575 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 576 void LIRGenerator::do_CompareOp(CompareOp* x) {
 577   LIRItem left(x->x(), this);
 578   LIRItem right(x->y(), this);
 579   left.load_item();
 580   right.load_item();
 581   LIR_Opr reg = rlock_result(x);
 582   if (x->x()->type()->is_float_kind()) {
 583     Bytecodes::Code code = x->op();
 584     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 585   } else if (x->x()->type()->tag() == longTag) {
 586     __ lcmp2int(left.result(), right.result(), reg);
 587   } else {
 588     ShouldNotReachHere();
 589   }
 590 }
 591 
 592 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
 593   LIR_Opr t1 = LIR_OprFact::illegalOpr;
 594   LIR_Opr t2 = LIR_OprFact::illegalOpr;
 595   cmp_value.load_item();
 596   new_value.load_item();
 597   if (type == T_OBJECT) {
 598     if (UseCompressedOops) {
 599       t1 = new_register(T_OBJECT);
 600       t2 = new_register(T_OBJECT);
 601     }
 602     __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 603   } else if (type == T_INT) {
 604     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 605   } else if (type == T_LONG) {
 606     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
 607   } else {
 608     ShouldNotReachHere();
 609   }
 610   // Generate conditional move of boolean result.
 611   LIR_Opr result = new_register(T_INT);
 612   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 613            result, type);
 614   return result;
 615 }
 616 
 617 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
 618   Unimplemented(); // Currently not supported on this platform.
 619   return LIR_OprFact::illegalOpr;
 620 }
 621 
 622 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
 623   LIR_Opr result = new_register(type);
 624   value.load_item();
 625   __ xadd(addr, value.result(), result, LIR_OprFact::illegalOpr);
 626   return result;
 627 }
 628 
 629 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 630   switch (x->id()) {
 631     case vmIntrinsics::_dabs:
 632     case vmIntrinsics::_dsqrt:
 633     case vmIntrinsics::_dsqrt_strict: {
 634       assert(x->number_of_arguments() == 1, "wrong type");
 635       LIRItem value(x->argument_at(0), this);
 636       value.load_item();
 637       LIR_Opr dst = rlock_result(x);
 638 
 639       switch (x->id()) {
 640         case vmIntrinsics::_dsqrt:
 641         case vmIntrinsics::_dsqrt_strict: {
 642           __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 643           break;
 644         }
 645         case vmIntrinsics::_dabs: {
 646           __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 647           break;
 648         }
 649         default:
 650           ShouldNotReachHere();
 651       }
 652       break;
 653     }
 654     case vmIntrinsics::_dsin:   // fall through
 655     case vmIntrinsics::_dcos:   // fall through
 656     case vmIntrinsics::_dtan:   // fall through
 657     case vmIntrinsics::_dlog:   // fall through
 658     case vmIntrinsics::_dlog10: // fall through
 659     case vmIntrinsics::_dexp: {
 660       assert(x->number_of_arguments() == 1, "wrong type");
 661 
 662       address runtime_entry = NULL;
 663       switch (x->id()) {
 664         case vmIntrinsics::_dsin:
 665           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 666           break;
 667         case vmIntrinsics::_dcos:
 668           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 669           break;
 670         case vmIntrinsics::_dtan:
 671           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 672           break;
 673         case vmIntrinsics::_dlog:
 674           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 675           break;
 676         case vmIntrinsics::_dlog10:
 677           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 678           break;
 679         case vmIntrinsics::_dexp:
 680           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 681           break;
 682         default:
 683           ShouldNotReachHere();
 684       }
 685 
 686       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
 687       set_result(x, result);
 688       break;
 689     }
 690     case vmIntrinsics::_dpow: {
 691       assert(x->number_of_arguments() == 2, "wrong type");
 692       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 693       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
 694       set_result(x, result);
 695       break;
 696     }
 697     default:
 698       break;
 699   }
 700 }
 701 
 702 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 703   assert(x->number_of_arguments() == 5, "wrong type");
 704 
 705   // Copy stubs possibly call C code, e.g. G1 barriers, so we need to reserve room
 706   // for the C ABI (see frame::z_abi_160).
 707   BasicTypeArray sig; // Empty signature is precise enough.
 708   frame_map()->c_calling_convention(&sig);
 709 
 710   // Make all state_for calls early since they can emit code.
 711   CodeEmitInfo* info = state_for (x, x->state());
 712 
 713   LIRItem src(x->argument_at(0), this);
 714   LIRItem src_pos(x->argument_at(1), this);
 715   LIRItem dst(x->argument_at(2), this);
 716   LIRItem dst_pos(x->argument_at(3), this);
 717   LIRItem length(x->argument_at(4), this);
 718 
 719   // Operands for arraycopy must use fixed registers, otherwise
 720   // LinearScan will fail allocation (because arraycopy always needs a
 721   // call).
 722 
 723   src.load_item_force     (FrameMap::as_oop_opr(Z_ARG1));
 724   src_pos.load_item_force (FrameMap::as_opr(Z_ARG2));
 725   dst.load_item_force     (FrameMap::as_oop_opr(Z_ARG3));
 726   dst_pos.load_item_force (FrameMap::as_opr(Z_ARG4));
 727   length.load_item_force  (FrameMap::as_opr(Z_ARG5));
 728 
 729   LIR_Opr tmp =            FrameMap::as_opr(Z_R7);
 730 
 731   set_no_result(x);
 732 
 733   int flags;
 734   ciArrayKlass* expected_type;
 735   arraycopy_helper(x, &flags, &expected_type);
 736 
 737   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
 738                length.result(), tmp, expected_type, flags, info); // does add_safepoint
 739 }
 740 
 741 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
 742 // _i2b, _i2c, _i2s
 743 void LIRGenerator::do_Convert(Convert* x) {
 744   LIRItem value(x->value(), this);
 745 
 746   value.load_item();
 747   LIR_Opr reg = rlock_result(x);
 748   __ convert(x->op(), value.result(), reg);
 749 }
 750 
 751 void LIRGenerator::do_NewInstance(NewInstance* x) {
 752   print_if_not_loaded(x);
 753 
 754   // This instruction can be deoptimized in the slow path : use
 755   // Z_R2 as result register.
 756   const LIR_Opr reg = result_register_for (x->type());
 757 
 758   CodeEmitInfo* info = state_for (x, x->state());
 759   LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
 760   LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
 761   LIR_Opr tmp3 = reg;
 762   LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
 763   LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
 764   new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
 765   LIR_Opr result = rlock_result(x);
 766   __ move(reg, result);
 767 }
 768 
 769 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
 770   CodeEmitInfo* info = state_for (x, x->state());
 771 
 772   LIRItem length(x->length(), this);
 773   length.load_item();
 774 
 775   LIR_Opr reg = result_register_for (x->type());
 776   LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
 777   LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
 778   LIR_Opr tmp3 = reg;
 779   LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
 780   LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
 781   LIR_Opr len = length.result();
 782   BasicType elem_type = x->elt_type();
 783 
 784   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
 785 
 786   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
 787   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
 788 
 789   LIR_Opr result = rlock_result(x);
 790   __ move(reg, result);
 791 }
 792 
 793 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
 794   // Evaluate state_for early since it may emit code.
 795   CodeEmitInfo* info = state_for (x, x->state());
 796   // In case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
 797   // and therefore provide the state before the parameters have been consumed.
 798   CodeEmitInfo* patching_info = NULL;
 799   if (!x->klass()->is_loaded() || PatchALot) {
 800     patching_info = state_for (x, x->state_before());
 801   }
 802 
 803   LIRItem length(x->length(), this);
 804   length.load_item();
 805 
 806   const LIR_Opr reg = result_register_for (x->type());
 807   LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
 808   LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
 809   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 810   LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
 811   LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
 812   LIR_Opr len = length.result();
 813 
 814   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
 815   ciKlass* obj = ciObjArrayKlass::make(x->klass());
 816   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
 817     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
 818   }
 819   klass2reg_with_patching(klass_reg, obj, patching_info);
 820   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
 821 
 822   LIR_Opr result = rlock_result(x);
 823   __ move(reg, result);
 824 }
 825 
 826 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
 827   Values* dims = x->dims();
 828   int i = dims->length();
 829   LIRItemList* items = new LIRItemList(i, i, NULL);
 830   while (i-- > 0) {
 831     LIRItem* size = new LIRItem(dims->at(i), this);
 832     items->at_put(i, size);
 833   }
 834 
 835   // Evaluate state_for early since it may emit code.
 836   CodeEmitInfo* patching_info = NULL;
 837   if (!x->klass()->is_loaded() || PatchALot) {
 838     patching_info = state_for (x, x->state_before());
 839 
 840     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
 841     // clone all handlers (NOTE: Usually this is handled transparently
 842     // by the CodeEmitInfo cloning logic in CodeStub constructors but
 843     // is done explicitly here because a stub isn't being used).
 844     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
 845   }
 846   CodeEmitInfo* info = state_for (x, x->state());
 847 
 848   i = dims->length();
 849   while (--i >= 0) {
 850     LIRItem* size = items->at(i);
 851     size->load_nonconstant(32);
 852     // FrameMap::_reserved_argument_area_size includes the dimensions varargs, because
 853     // it's initialized to hir()->max_stack() when the FrameMap is created.
 854     store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
 855   }
 856 
 857   LIR_Opr klass_reg = FrameMap::Z_R3_metadata_opr;
 858   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
 859 
 860   LIR_Opr rank = FrameMap::Z_R4_opr;
 861   __ move(LIR_OprFact::intConst(x->rank()), rank);
 862   LIR_Opr varargs = FrameMap::Z_R5_opr;
 863   __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::Z_SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
 864           varargs);
 865   LIR_OprList* args = new LIR_OprList(3);
 866   args->append(klass_reg);
 867   args->append(rank);
 868   args->append(varargs);
 869   LIR_Opr reg = result_register_for (x->type());
 870   __ call_runtime(Runtime1::entry_for (Runtime1::new_multi_array_id),
 871                   LIR_OprFact::illegalOpr,
 872                   reg, args, info);
 873 
 874   LIR_Opr result = rlock_result(x);
 875   __ move(reg, result);
 876 }
 877 
 878 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
 879   // Nothing to do.
 880 }
 881 
 882 void LIRGenerator::do_CheckCast(CheckCast* x) {
 883   LIRItem obj(x->obj(), this);
 884 
 885   CodeEmitInfo* patching_info = NULL;
 886   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
 887     // Must do this before locking the destination register as an oop register,
 888     // and before the obj is loaded (the latter is for deoptimization).
 889     patching_info = state_for (x, x->state_before());
 890   }
 891   obj.load_item();
 892 
 893   // info for exceptions
 894   CodeEmitInfo* info_for_exception =
 895       (x->needs_exception_state() ? state_for(x) :
 896                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
 897 
 898   CodeStub* stub;
 899   if (x->is_incompatible_class_change_check()) {
 900     assert(patching_info == NULL, "can't patch this");
 901     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
 902   } else if (x->is_invokespecial_receiver_check()) {
 903     assert(patching_info == NULL, "can't patch this");
 904     stub = new DeoptimizeStub(info_for_exception,
 905                               Deoptimization::Reason_class_check,
 906                               Deoptimization::Action_none);
 907   } else {
 908     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
 909   }
 910   LIR_Opr reg = rlock_result(x);
 911   LIR_Opr tmp1 = new_register(objectType);
 912   LIR_Opr tmp2 = new_register(objectType);
 913   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 914   __ checkcast(reg, obj.result(), x->klass(),
 915                tmp1, tmp2, tmp3,
 916                x->direct_compare(), info_for_exception, patching_info, stub,
 917                x->profiled_method(), x->profiled_bci());
 918 }
 919 
 920 
 921 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
 922   LIRItem obj(x->obj(), this);
 923   CodeEmitInfo* patching_info = NULL;
 924   if (!x->klass()->is_loaded() || PatchALot) {
 925     patching_info = state_for (x, x->state_before());
 926   }
 927   // Ensure the result register is not the input register because the
 928   // result is initialized before the patching safepoint.
 929   obj.load_item();
 930   LIR_Opr out_reg = rlock_result(x);
 931   LIR_Opr tmp1 = new_register(objectType);
 932   LIR_Opr tmp2 = new_register(objectType);
 933   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
 934   __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
 935                 x->direct_compare(), patching_info,
 936                 x->profiled_method(), x->profiled_bci());
 937 }
 938 
 939 
 940 void LIRGenerator::do_If (If* x) {
 941   assert(x->number_of_sux() == 2, "inconsistency");
 942   ValueTag tag = x->x()->type()->tag();
 943   bool is_safepoint = x->is_safepoint();
 944 
 945   If::Condition cond = x->cond();
 946 
 947   LIRItem xitem(x->x(), this);
 948   LIRItem yitem(x->y(), this);
 949   LIRItem* xin = &xitem;
 950   LIRItem* yin = &yitem;
 951 
 952   if (tag == longTag) {
 953     // For longs, only conditions "eql", "neq", "lss", "geq" are valid;
 954     // mirror for other conditions.
 955     if (cond == If::gtr || cond == If::leq) {
 956       cond = Instruction::mirror(cond);
 957       xin = &yitem;
 958       yin = &xitem;
 959     }
 960     xin->set_destroys_register();
 961   }
 962   xin->load_item();
 963   // TODO: don't load long constants != 0L
 964   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
 965     // inline long zero
 966     yin->dont_load_item();
 967   } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
 968     // Longs cannot handle constants at right side.
 969     yin->load_item();
 970   } else {
 971     yin->dont_load_item();
 972   }
 973 
 974   LIR_Opr left = xin->result();
 975   LIR_Opr right = yin->result();
 976 
 977   set_no_result(x);
 978 
 979   // Add safepoint before generating condition code so it can be recomputed.
 980   if (x->is_safepoint()) {
 981     // Increment backedge counter if needed.
 982     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
 983         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
 984     // Use safepoint_poll_register() instead of LIR_OprFact::illegalOpr.
 985     __ safepoint(safepoint_poll_register(), state_for (x, x->state_before()));
 986   }
 987 
 988   __ cmp(lir_cond(cond), left, right);
 989   // Generate branch profiling. Profiling code doesn't kill flags.
 990   profile_branch(x, cond);
 991   move_to_phi(x->state());
 992   if (x->x()->type()->is_float_kind()) {
 993     __ branch(lir_cond(cond), x->tsux(), x->usux());
 994   } else {
 995     __ branch(lir_cond(cond), x->tsux());
 996   }
 997   assert(x->default_sux() == x->fsux(), "wrong destination above");
 998   __ jump(x->default_sux());
 999 }
1000 
1001 LIR_Opr LIRGenerator::getThreadPointer() {
1002   return FrameMap::as_pointer_opr(Z_thread);
1003 }
1004 
1005 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1006   __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::Z_R2_opr);
1007   LIR_OprList* args = new LIR_OprList(1);
1008   args->append(FrameMap::Z_R2_opr);
1009   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1010   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1011 }
1012 
1013 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1014                                         CodeEmitInfo* info) {
1015   __ store(value, address, info);
1016 }
1017 
1018 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1019                                        CodeEmitInfo* info) {
1020   __ load(address, result, info);
1021 }
1022 
1023 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1024   assert(UseCRC32Intrinsics, "or should not be here");
1025   LIR_Opr result = rlock_result(x);
1026 
1027   switch (x->id()) {
1028     case vmIntrinsics::_updateCRC32: {
1029       LIRItem crc(x->argument_at(0), this);
1030       LIRItem val(x->argument_at(1), this);
1031       // Registers destroyed by update_crc32.
1032       crc.set_destroys_register();
1033       val.set_destroys_register();
1034       crc.load_item();
1035       val.load_item();
1036       __ update_crc32(crc.result(), val.result(), result);
1037       break;
1038     }
1039     case vmIntrinsics::_updateBytesCRC32:
1040     case vmIntrinsics::_updateByteBufferCRC32: {
1041       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1042 
1043       LIRItem crc(x->argument_at(0), this);
1044       LIRItem buf(x->argument_at(1), this);
1045       LIRItem off(x->argument_at(2), this);
1046       LIRItem len(x->argument_at(3), this);
1047       buf.load_item();
1048       off.load_nonconstant();
1049 
1050       LIR_Opr index = off.result();
1051       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1052       if (off.result()->is_constant()) {
1053         index = LIR_OprFact::illegalOpr;
1054         offset += off.result()->as_jint();
1055       }
1056       LIR_Opr base_op = buf.result();
1057 
1058       if (index->is_valid()) {
1059         LIR_Opr tmp = new_register(T_LONG);
1060         __ convert(Bytecodes::_i2l, index, tmp);
1061         index = tmp;
1062       }
1063 
1064       LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
1065 
1066       BasicTypeList signature(3);
1067       signature.append(T_INT);
1068       signature.append(T_ADDRESS);
1069       signature.append(T_INT);
1070       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1071       const LIR_Opr result_reg = result_register_for (x->type());
1072 
1073       LIR_Opr arg1 = cc->at(0);
1074       LIR_Opr arg2 = cc->at(1);
1075       LIR_Opr arg3 = cc->at(2);
1076 
1077       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1078       __ leal(LIR_OprFact::address(a), arg2);
1079       len.load_item_force(arg3); // We skip int->long conversion here, because CRC32 stub expects int.
1080 
1081       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1082       __ move(result_reg, result);
1083       break;
1084     }
1085     default: {
1086       ShouldNotReachHere();
1087     }
1088   }
1089 }
1090 
1091 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1092   assert(UseCRC32CIntrinsics, "or should not be here");
1093   LIR_Opr result = rlock_result(x);
1094 
1095   switch (x->id()) {
1096     case vmIntrinsics::_updateBytesCRC32C:
1097     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1098       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1099 
1100       LIRItem crc(x->argument_at(0), this);
1101       LIRItem buf(x->argument_at(1), this);
1102       LIRItem off(x->argument_at(2), this);
1103       LIRItem end(x->argument_at(3), this);
1104       buf.load_item();
1105       off.load_nonconstant();
1106       end.load_nonconstant();
1107 
1108       // len = end - off
1109       LIR_Opr len  = end.result();
1110       LIR_Opr tmpA = new_register(T_INT);
1111       LIR_Opr tmpB = new_register(T_INT);
1112       __ move(end.result(), tmpA);
1113       __ move(off.result(), tmpB);
1114       __ sub(tmpA, tmpB, tmpA);
1115       len = tmpA;
1116 
1117       LIR_Opr index = off.result();
1118       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1119       if (off.result()->is_constant()) {
1120         index = LIR_OprFact::illegalOpr;
1121         offset += off.result()->as_jint();
1122       }
1123       LIR_Opr base_op = buf.result();
1124 
1125       if (index->is_valid()) {
1126         LIR_Opr tmp = new_register(T_LONG);
1127         __ convert(Bytecodes::_i2l, index, tmp);
1128         index = tmp;
1129       }
1130 
1131       LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
1132 
1133       BasicTypeList signature(3);
1134       signature.append(T_INT);
1135       signature.append(T_ADDRESS);
1136       signature.append(T_INT);
1137       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1138       const LIR_Opr result_reg = result_register_for (x->type());
1139 
1140       LIR_Opr arg1 = cc->at(0);
1141       LIR_Opr arg2 = cc->at(1);
1142       LIR_Opr arg3 = cc->at(2);
1143 
1144       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
1145       __ leal(LIR_OprFact::address(a), arg2);
1146       __ move(len, cc->at(2));   // We skip int->long conversion here, because CRC32C stub expects int.
1147 
1148       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1149       __ move(result_reg, result);
1150       break;
1151     }
1152     default: {
1153       ShouldNotReachHere();
1154     }
1155   }
1156 }
1157 
1158 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1159   assert(x->number_of_arguments() == 3, "wrong type");
1160   assert(UseFMA, "Needs FMA instructions support.");
1161   LIRItem value(x->argument_at(0), this);
1162   LIRItem value1(x->argument_at(1), this);
1163   LIRItem value2(x->argument_at(2), this);
1164 
1165   value2.set_destroys_register();
1166 
1167   value.load_item();
1168   value1.load_item();
1169   value2.load_item();
1170 
1171   LIR_Opr calc_input = value.result();
1172   LIR_Opr calc_input1 = value1.result();
1173   LIR_Opr calc_input2 = value2.result();
1174   LIR_Opr calc_result = rlock_result(x);
1175 
1176   switch (x->id()) {
1177   case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1178   case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1179   default:                    ShouldNotReachHere();
1180   }
1181 }
1182 
1183 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1184   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1185 }