1 /*
   2  * Copyright (c) 2013, Red Hat Inc.
   3  * Copyright (c) 2005, 2019, Oracle and/or its affiliates.
   4  * All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "vmreg_aarch64.inline.hpp"
  41 
  42 #if INCLUDE_ALL_GCS
  43 #include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  44 #endif
  45 
  46 #ifdef ASSERT
  47 #define __ gen()->lir(__FILE__, __LINE__)->
  48 #else
  49 #define __ gen()->lir()->
  50 #endif
  51 
  52 // Item will be loaded into a byte register; Intel only
  53 void LIRItem::load_byte_item() {
  54   load_item();
  55 }
  56 
  57 
  58 void LIRItem::load_nonconstant() {
  59   LIR_Opr r = value()->operand();
  60   if (r->is_constant()) {
  61     _result = r;
  62   } else {
  63     load_item();
  64   }
  65 }
  66 
  67 //--------------------------------------------------------------
  68 //               LIRGenerator
  69 //--------------------------------------------------------------
  70 
  71 
  72 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
  73 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r3_opr; }
  74 LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
  75 LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  76 LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
  77 LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
  78 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r0_opr; }
  79 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
  80 
  81 
  82 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
  83   LIR_Opr opr;
  84   switch (type->tag()) {
  85     case intTag:     opr = FrameMap::r0_opr;          break;
  86     case objectTag:  opr = FrameMap::r0_oop_opr;      break;
  87     case longTag:    opr = FrameMap::long0_opr;        break;
  88     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  89     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  90 
  91     case addressTag:
  92     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  93   }
  94 
  95   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  96   return opr;
  97 }
  98 
  99 
 100 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 101   LIR_Opr reg = new_register(T_INT);
 102   set_vreg_flag(reg, LIRGenerator::byte_reg);
 103   return reg;
 104 }
 105 
 106 
 107 //--------- loading items into registers --------------------------------
 108 
 109 
 110 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 111   if (v->type()->as_IntConstant() != NULL) {
 112     return v->type()->as_IntConstant()->value() == 0L;
 113   } else if (v->type()->as_LongConstant() != NULL) {
 114     return v->type()->as_LongConstant()->value() == 0L;
 115   } else if (v->type()->as_ObjectConstant() != NULL) {
 116     return v->type()->as_ObjectConstant()->value()->is_null_object();
 117   } else {
 118     return false;
 119   }
 120 }
 121 
 122 bool LIRGenerator::can_inline_as_constant(Value v) const {
 123   // FIXME: Just a guess
 124   if (v->type()->as_IntConstant() != NULL) {
 125     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 126   } else if (v->type()->as_LongConstant() != NULL) {
 127     return v->type()->as_LongConstant()->value() == 0L;
 128   } else if (v->type()->as_ObjectConstant() != NULL) {
 129     return v->type()->as_ObjectConstant()->value()->is_null_object();
 130   } else {
 131     return false;
 132   }
 133 }
 134 
 135 
 136 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
 137 
 138 
 139 LIR_Opr LIRGenerator::safepoint_poll_register() {
 140   return LIR_OprFact::illegalOpr;
 141 }
 142 
 143 
 144 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 145                                             int shift, int disp, BasicType type) {
 146   assert(base->is_register(), "must be");
 147 
 148   // accumulate fixed displacements
 149   if (index->is_constant()) {
 150     disp += index->as_constant_ptr()->as_jint() << shift;
 151     index = LIR_OprFact::illegalOpr;
 152   }
 153 
 154   if (index->is_register()) {
 155     // apply the shift and accumulate the displacement
 156     if (shift > 0) {
 157       LIR_Opr tmp = new_pointer_register();
 158       __ shift_left(index, shift, tmp);
 159       index = tmp;
 160     }
 161     if (disp != 0) {
 162       LIR_Opr tmp = new_pointer_register();
 163       if (Assembler::operand_valid_for_add_sub_immediate(disp)) {
 164         __ add(tmp, tmp, LIR_OprFact::intptrConst(disp));
 165         index = tmp;
 166       } else {
 167         __ move(tmp, LIR_OprFact::intptrConst(disp));
 168         __ add(tmp, index, tmp);
 169         index = tmp;
 170       }
 171       disp = 0;
 172     }
 173   } else if (disp != 0 && !Address::offset_ok_for_immed(disp, shift)) {
 174     // index is illegal so replace it with the displacement loaded into a register
 175     index = new_pointer_register();
 176     __ move(LIR_OprFact::intptrConst(disp), index);
 177     disp = 0;
 178   }
 179 
 180   // at this point we either have base + index or base + displacement
 181   if (disp == 0) {
 182     return new LIR_Address(base, index, type);
 183   } else {
 184     assert(Address::offset_ok_for_immed(disp, 0), "must be");
 185     return new LIR_Address(base, disp, type);
 186   }
 187 }
 188 
 189 
 190 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 191                                               BasicType type, bool needs_card_mark) {
 192   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 193   int elem_size = type2aelembytes(type);
 194   int shift = exact_log2(elem_size);
 195 
 196   LIR_Address* addr;
 197   if (index_opr->is_constant()) {
 198     addr = new LIR_Address(array_opr,
 199                            offset_in_bytes + index_opr->as_jint() * elem_size, type);
 200   } else {
 201 // #ifdef _LP64
 202 //     if (index_opr->type() == T_INT) {
 203 //       LIR_Opr tmp = new_register(T_LONG);
 204 //       __ convert(Bytecodes::_i2l, index_opr, tmp);
 205 //       index_opr = tmp;
 206 //     }
 207 // #endif
 208     if (offset_in_bytes) {
 209       LIR_Opr tmp = new_pointer_register();
 210       __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
 211       array_opr = tmp;
 212       offset_in_bytes = 0;
 213     }
 214     addr =  new LIR_Address(array_opr,
 215                             index_opr,
 216                             LIR_Address::scale(type),
 217                             offset_in_bytes, type);
 218   }
 219   if (needs_card_mark) {
 220     // This store will need a precise card mark, so go ahead and
 221     // compute the full adddres instead of computing once for the
 222     // store and again for the card mark.
 223     LIR_Opr tmp = new_pointer_register();
 224     __ leal(LIR_OprFact::address(addr), tmp);
 225     return new LIR_Address(tmp, type);
 226   } else {
 227     return addr;
 228   }
 229 }
 230 
 231 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 232   LIR_Opr r;
 233   if (type == T_LONG) {
 234     r = LIR_OprFact::longConst(x);
 235     if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
 236       LIR_Opr tmp = new_register(type);
 237       __ move(r, tmp);
 238       return tmp;
 239     }
 240   } else if (type == T_INT) {
 241     r = LIR_OprFact::intConst(x);
 242     if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
 243       // This is all rather nasty.  We don't know whether our constant
 244       // is required for a logical or an arithmetic operation, wo we
 245       // don't know what the range of valid values is!!
 246       LIR_Opr tmp = new_register(type);
 247       __ move(r, tmp);
 248       return tmp;
 249     }
 250   } else {
 251     ShouldNotReachHere();
 252   }
 253   return r;
 254 }
 255 
 256 
 257 
 258 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
 259   LIR_Opr pointer = new_pointer_register();
 260   __ move(LIR_OprFact::intptrConst(counter), pointer);
 261   LIR_Address* addr = new LIR_Address(pointer, type);
 262   increment_counter(addr, step);
 263 }
 264 
 265 
 266 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
 267   LIR_Opr imm = NULL;
 268   switch(addr->type()) {
 269   case T_INT:
 270     imm = LIR_OprFact::intConst(step);
 271     break;
 272   case T_LONG:
 273     imm = LIR_OprFact::longConst(step);
 274     break;
 275   default:
 276     ShouldNotReachHere();
 277   }
 278   LIR_Opr reg = new_register(addr->type());
 279   __ load(addr, reg);
 280   __ add(reg, imm, reg);
 281   __ store(reg, addr);
 282 }
 283 
 284 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
 285   LIR_Opr reg = new_register(T_INT);
 286   __ load(generate_address(base, disp, T_INT), reg, info);
 287   __ cmp(condition, reg, LIR_OprFact::intConst(c));
 288 }
 289 
 290 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
 291   LIR_Opr reg1 = new_register(T_INT);
 292   __ load(generate_address(base, disp, type), reg1, info);
 293   __ cmp(condition, reg, reg1);
 294 }
 295 
 296 
 297 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 298 
 299   if (is_power_of_2(c - 1)) {
 300     __ shift_left(left, exact_log2(c - 1), tmp);
 301     __ add(tmp, left, result);
 302     return true;
 303   } else if (is_power_of_2(c + 1)) {
 304     __ shift_left(left, exact_log2(c + 1), tmp);
 305     __ sub(tmp, left, result);
 306     return true;
 307   } else {
 308     return false;
 309   }
 310 }
 311 
 312 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 313   BasicType type = item->type();
 314   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 315 }
 316 
 317 //----------------------------------------------------------------------
 318 //             visitor functions
 319 //----------------------------------------------------------------------
 320 
 321 
 322 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
 323   assert(x->is_pinned(),"");
 324   bool needs_range_check = x->compute_needs_range_check();
 325   bool use_length = x->length() != NULL;
 326   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 327   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 328                                          !get_jobject_constant(x->value())->is_null_object() ||
 329                                          x->should_profile());
 330 
 331   LIRItem array(x->array(), this);
 332   LIRItem index(x->index(), this);
 333   LIRItem value(x->value(), this);
 334   LIRItem length(this);
 335 
 336   array.load_item();
 337   index.load_nonconstant();
 338 
 339   if (use_length && needs_range_check) {
 340     length.set_instruction(x->length());
 341     length.load_item();
 342 
 343   }
 344   if (needs_store_check || x->check_boolean()) {
 345     value.load_item();
 346   } else {
 347     value.load_for_store(x->elt_type());
 348   }
 349 
 350   set_no_result(x);
 351 
 352   // the CodeEmitInfo must be duplicated for each different
 353   // LIR-instruction because spilling can occur anywhere between two
 354   // instructions and so the debug information must be different
 355   CodeEmitInfo* range_check_info = state_for(x);
 356   CodeEmitInfo* null_check_info = NULL;
 357   if (x->needs_null_check()) {
 358     null_check_info = new CodeEmitInfo(range_check_info);
 359   }
 360 
 361   // emit array address setup early so it schedules better
 362   // FIXME?  No harm in this on aarch64, and it might help
 363   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 364 
 365   if (GenerateRangeChecks && needs_range_check) {
 366     if (use_length) {
 367       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 368       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 369     } else {
 370       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
 371       // range_check also does the null check
 372       null_check_info = NULL;
 373     }
 374   }
 375 
 376   if (GenerateArrayStoreCheck && needs_store_check) {
 377     LIR_Opr tmp1 = new_register(objectType);
 378     LIR_Opr tmp2 = new_register(objectType);
 379     LIR_Opr tmp3 = new_register(objectType);
 380 
 381     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 382     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 383   }
 384 
 385   if (obj_store) {
 386     // Needs GC write barriers.
 387     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 388                 true /* do_load */, false /* patch */, NULL);
 389     __ move(value.result(), array_addr, null_check_info);
 390     // Seems to be a precise
 391     post_barrier(LIR_OprFact::address(array_addr), value.result());
 392   } else {
 393     LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
 394     __ move(result, array_addr, null_check_info);
 395   }
 396 }
 397 
 398 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 399   assert(x->is_pinned(),"");
 400   LIRItem obj(x->obj(), this);
 401   obj.load_item();
 402 
 403   set_no_result(x);
 404 
 405   // "lock" stores the address of the monitor stack slot, so this is not an oop
 406   LIR_Opr lock = new_register(T_INT);
 407   // Need a scratch register for biased locking
 408   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 409   if (UseBiasedLocking) {
 410     scratch = new_register(T_INT);
 411   }
 412 
 413   CodeEmitInfo* info_for_exception = NULL;
 414   if (x->needs_null_check()) {
 415     info_for_exception = state_for(x);
 416   }
 417   // this CodeEmitInfo must not have the xhandlers because here the
 418   // object is already locked (xhandlers expect object to be unlocked)
 419   CodeEmitInfo* info = state_for(x, x->state(), true);
 420   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 421                         x->monitor_no(), info_for_exception, info);
 422 }
 423 
 424 
 425 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 426   assert(x->is_pinned(),"");
 427 
 428   LIRItem obj(x->obj(), this);
 429   obj.dont_load_item();
 430 
 431   LIR_Opr lock = new_register(T_INT);
 432   LIR_Opr obj_temp = new_register(T_INT);
 433   set_no_result(x);
 434   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 435 }
 436 
 437 
 438 void LIRGenerator::do_NegateOp(NegateOp* x) {
 439 
 440   LIRItem from(x->x(), this);
 441   from.load_item();
 442   LIR_Opr result = rlock_result(x);
 443   __ negate (from.result(), result);
 444 
 445 }
 446 
 447 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 448 //      _dadd, _dmul, _dsub, _ddiv, _drem
 449 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 450 
 451   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
 452     // float remainder is implemented as a direct call into the runtime
 453     LIRItem right(x->x(), this);
 454     LIRItem left(x->y(), this);
 455 
 456     BasicTypeList signature(2);
 457     if (x->op() == Bytecodes::_frem) {
 458       signature.append(T_FLOAT);
 459       signature.append(T_FLOAT);
 460     } else {
 461       signature.append(T_DOUBLE);
 462       signature.append(T_DOUBLE);
 463     }
 464     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 465 
 466     const LIR_Opr result_reg = result_register_for(x->type());
 467     left.load_item_force(cc->at(1));
 468     right.load_item();
 469 
 470     __ move(right.result(), cc->at(0));
 471 
 472     address entry;
 473     if (x->op() == Bytecodes::_frem) {
 474       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
 475     } else {
 476       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
 477     }
 478 
 479     LIR_Opr result = rlock_result(x);
 480     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
 481     __ move(result_reg, result);
 482 
 483     return;
 484   }
 485 
 486   LIRItem left(x->x(),  this);
 487   LIRItem right(x->y(), this);
 488   LIRItem* left_arg  = &left;
 489   LIRItem* right_arg = &right;
 490 
 491   // Always load right hand side.
 492   right.load_item();
 493 
 494   if (!left.is_register())
 495     left.load_item();
 496 
 497   LIR_Opr reg = rlock(x);
 498   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 499   if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
 500     tmp = new_register(T_DOUBLE);
 501   }
 502 
 503   arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp());
 504 
 505   set_result(x, round_item(reg));
 506 }
 507 
 508 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
 509 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
 510 
 511   // missing test if instr is commutative and if we should swap
 512   LIRItem left(x->x(), this);
 513   LIRItem right(x->y(), this);
 514 
 515   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
 516 
 517     // the check for division by zero destroys the right operand
 518     right.set_destroys_register();
 519 
 520     // check for division by zero (destroys registers of right operand!)
 521     CodeEmitInfo* info = state_for(x);
 522 
 523     left.load_item();
 524     right.load_item();
 525 
 526     __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
 527     __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
 528 
 529     rlock_result(x);
 530     switch (x->op()) {
 531     case Bytecodes::_lrem:
 532       __ rem (left.result(), right.result(), x->operand());
 533       break;
 534     case Bytecodes::_ldiv:
 535       __ div (left.result(), right.result(), x->operand());
 536       break;
 537     default:
 538       ShouldNotReachHere();
 539       break;
 540     }
 541 
 542 
 543   } else {
 544     assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
 545             "expect lmul, ladd or lsub");
 546     // add, sub, mul
 547     left.load_item();
 548     if (! right.is_register()) {
 549       if (x->op() == Bytecodes::_lmul
 550           || ! right.is_constant()
 551           || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
 552         right.load_item();
 553       } else { // add, sub
 554         assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
 555         // don't load constants to save register
 556         right.load_nonconstant();
 557       }
 558     }
 559     rlock_result(x);
 560     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
 561   }
 562 }
 563 
 564 // for: _iadd, _imul, _isub, _idiv, _irem
 565 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
 566 
 567   // Test if instr is commutative and if we should swap
 568   LIRItem left(x->x(),  this);
 569   LIRItem right(x->y(), this);
 570   LIRItem* left_arg = &left;
 571   LIRItem* right_arg = &right;
 572   if (x->is_commutative() && left.is_stack() && right.is_register()) {
 573     // swap them if left is real stack (or cached) and right is real register(not cached)
 574     left_arg = &right;
 575     right_arg = &left;
 576   }
 577 
 578   left_arg->load_item();
 579 
 580   // do not need to load right, as we can handle stack and constants
 581   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
 582 
 583     right_arg->load_item();
 584     rlock_result(x);
 585 
 586     CodeEmitInfo* info = state_for(x);
 587     LIR_Opr tmp = new_register(T_INT);
 588     __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
 589     __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
 590     info = state_for(x);
 591 
 592     if (x->op() == Bytecodes::_irem) {
 593       __ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
 594     } else if (x->op() == Bytecodes::_idiv) {
 595       __ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
 596     }
 597 
 598   } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
 599     if (right.is_constant()
 600         && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
 601       right.load_nonconstant();
 602     } else {
 603       right.load_item();
 604     }
 605     rlock_result(x);
 606     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
 607   } else {
 608     assert (x->op() == Bytecodes::_imul, "expect imul");
 609     if (right.is_constant()) {
 610       int c = right.get_jint_constant();
 611       if (! is_power_of_2(c) && ! is_power_of_2(c + 1) && ! is_power_of_2(c - 1)) {
 612         // Cannot use constant op.
 613         right.load_item();
 614       } else {
 615         right.dont_load_item();
 616       }
 617     } else {
 618       right.load_item();
 619     }
 620     rlock_result(x);
 621     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
 622   }
 623 }
 624 
 625 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
 626   // when an operand with use count 1 is the left operand, then it is
 627   // likely that no move for 2-operand-LIR-form is necessary
 628   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
 629     x->swap_operands();
 630   }
 631 
 632   ValueTag tag = x->type()->tag();
 633   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
 634   switch (tag) {
 635     case floatTag:
 636     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
 637     case longTag:    do_ArithmeticOp_Long(x); return;
 638     case intTag:     do_ArithmeticOp_Int(x);  return;
 639   }
 640   ShouldNotReachHere();
 641 }
 642 
 643 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
 644 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
 645 
 646   LIRItem left(x->x(),  this);
 647   LIRItem right(x->y(), this);
 648 
 649   left.load_item();
 650 
 651   rlock_result(x);
 652   if (right.is_constant()) {
 653     right.dont_load_item();
 654     
 655     switch (x->op()) {
 656     case Bytecodes::_ishl: {
 657       int c = right.get_jint_constant() & 0x1f;
 658       __ shift_left(left.result(), c, x->operand());
 659       break;
 660     }
 661     case Bytecodes::_ishr: {
 662       int c = right.get_jint_constant() & 0x1f;
 663       __ shift_right(left.result(), c, x->operand());
 664       break;
 665     }
 666     case Bytecodes::_iushr: {
 667       int c = right.get_jint_constant() & 0x1f;
 668       __ unsigned_shift_right(left.result(), c, x->operand());
 669       break;
 670     }
 671     case Bytecodes::_lshl: {
 672       int c = right.get_jint_constant() & 0x3f;
 673       __ shift_left(left.result(), c, x->operand());
 674       break;
 675     }
 676     case Bytecodes::_lshr: {
 677       int c = right.get_jint_constant() & 0x3f;
 678       __ shift_right(left.result(), c, x->operand());
 679       break;
 680     }
 681     case Bytecodes::_lushr: {
 682       int c = right.get_jint_constant() & 0x3f;
 683       __ unsigned_shift_right(left.result(), c, x->operand());
 684       break;
 685     }
 686     default:
 687       ShouldNotReachHere();
 688     }
 689   } else {
 690     right.load_item();
 691     LIR_Opr tmp = new_register(T_INT);
 692     switch (x->op()) {
 693     case Bytecodes::_ishl: {
 694       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 695       __ shift_left(left.result(), tmp, x->operand(), tmp);
 696       break;
 697     }
 698     case Bytecodes::_ishr: {
 699       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 700       __ shift_right(left.result(), tmp, x->operand(), tmp);
 701       break;
 702     }
 703     case Bytecodes::_iushr: {
 704       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
 705       __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
 706       break;
 707     }
 708     case Bytecodes::_lshl: {
 709       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 710       __ shift_left(left.result(), tmp, x->operand(), tmp);
 711       break;
 712     }
 713     case Bytecodes::_lshr: {
 714       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 715       __ shift_right(left.result(), tmp, x->operand(), tmp);
 716       break;
 717     }
 718     case Bytecodes::_lushr: {
 719       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
 720       __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
 721       break;
 722     }
 723     default:
 724       ShouldNotReachHere();
 725     }
 726   }
 727 }
 728 
 729 // _iand, _land, _ior, _lor, _ixor, _lxor
 730 void LIRGenerator::do_LogicOp(LogicOp* x) {
 731 
 732   LIRItem left(x->x(),  this);
 733   LIRItem right(x->y(), this);
 734 
 735   left.load_item();
 736 
 737   rlock_result(x);
 738   if (right.is_constant()
 739       && ((right.type()->tag() == intTag
 740            && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
 741           || (right.type()->tag() == longTag
 742               && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant()))))  {
 743     right.dont_load_item();
 744   } else {
 745     right.load_item();
 746   }
 747   switch (x->op()) {
 748   case Bytecodes::_iand:
 749   case Bytecodes::_land:
 750     __ logical_and(left.result(), right.result(), x->operand()); break;
 751   case Bytecodes::_ior:
 752   case Bytecodes::_lor:
 753     __ logical_or (left.result(), right.result(), x->operand()); break;
 754   case Bytecodes::_ixor:
 755   case Bytecodes::_lxor:
 756     __ logical_xor(left.result(), right.result(), x->operand()); break;
 757   default: Unimplemented();
 758   }
 759 }
 760 
 761 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
 762 void LIRGenerator::do_CompareOp(CompareOp* x) {
 763   LIRItem left(x->x(), this);
 764   LIRItem right(x->y(), this);
 765   ValueTag tag = x->x()->type()->tag();
 766   if (tag == longTag) {
 767     left.set_destroys_register();
 768   }
 769   left.load_item();
 770   right.load_item();
 771   LIR_Opr reg = rlock_result(x);
 772 
 773   if (x->x()->type()->is_float_kind()) {
 774     Bytecodes::Code code = x->op();
 775     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 776   } else if (x->x()->type()->tag() == longTag) {
 777     __ lcmp2int(left.result(), right.result(), reg);
 778   } else {
 779     Unimplemented();
 780   }
 781 }
 782 
 783 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
 784   assert(x->number_of_arguments() == 4, "wrong type");
 785   LIRItem obj   (x->argument_at(0), this);  // object
 786   LIRItem offset(x->argument_at(1), this);  // offset of field
 787   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
 788   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
 789 
 790   assert(obj.type()->tag() == objectTag, "invalid type");
 791 
 792   // In 64bit the type can be long, sparc doesn't have this assert
 793   // assert(offset.type()->tag() == intTag, "invalid type");
 794 
 795   assert(cmp.type()->tag() == type->tag(), "invalid type");
 796   assert(val.type()->tag() == type->tag(), "invalid type");
 797 
 798   // get address of field
 799   obj.load_item();
 800   offset.load_nonconstant();
 801   val.load_item();
 802   cmp.load_item();
 803 
 804   LIR_Address* a;
 805   if(offset.result()->is_constant()) {
 806     jlong c = offset.result()->as_jlong();
 807     if ((jlong)((jint)c) == c) {
 808       a = new LIR_Address(obj.result(),
 809                           (jint)c,
 810                           as_BasicType(type));
 811     } else {
 812       LIR_Opr tmp = new_register(T_LONG);
 813       __ move(offset.result(), tmp);
 814       a = new LIR_Address(obj.result(),
 815                           tmp,
 816                           as_BasicType(type));
 817     }
 818   } else {
 819     a = new LIR_Address(obj.result(),
 820                         offset.result(),
 821                         LIR_Address::times_1,
 822                         0,
 823                         as_BasicType(type));
 824   }
 825   LIR_Opr addr = new_pointer_register();
 826   __ leal(LIR_OprFact::address(a), addr);
 827 
 828   if (type == objectType) {  // Write-barrier needed for Object fields.
 829     // Do the pre-write barrier, if any.
 830     pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
 831                 true /* do_load */, false /* patch */, NULL);
 832   }
 833 
 834   LIR_Opr result = rlock_result(x);
 835 
 836   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 837 
 838   if (type == objectType) {
 839     __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
 840                result);
 841   } else if (type == intType)
 842     __ cas_int(addr, cmp.result(), val.result(), ill, ill, result);
 843   else if (type == longType)
 844     __ cas_long(addr, cmp.result(), val.result(), ill, ill, result);
 845   else {
 846     ShouldNotReachHere();
 847   }
 848 
 849   if (type == objectType) {   // Write-barrier needed for Object fields.
 850     // Seems to be precise
 851     post_barrier(addr, val.result());
 852   }
 853 }
 854 
 855 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 856   switch (x->id()) {
 857     case vmIntrinsics::_dabs:
 858     case vmIntrinsics::_dsqrt: {
 859       assert(x->number_of_arguments() == 1, "wrong type");
 860       LIRItem value(x->argument_at(0), this);
 861       value.load_item();
 862       LIR_Opr dst = rlock_result(x);
 863 
 864       switch (x->id()) {
 865       case vmIntrinsics::_dsqrt: {
 866         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 867         break;
 868       }
 869       case vmIntrinsics::_dabs: {
 870         __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 871         break;
 872       }
 873       }
 874       break;
 875     }
 876     case vmIntrinsics::_dlog10: // fall through
 877     case vmIntrinsics::_dlog: // fall through
 878     case vmIntrinsics::_dsin: // fall through
 879     case vmIntrinsics::_dtan: // fall through
 880     case vmIntrinsics::_dcos: // fall through
 881     case vmIntrinsics::_dexp: {
 882       assert(x->number_of_arguments() == 1, "wrong type");
 883 
 884       address runtime_entry = NULL;
 885       switch (x->id()) {
 886       case vmIntrinsics::_dsin:
 887         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 888         break;
 889       case vmIntrinsics::_dcos:
 890         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 891         break;
 892       case vmIntrinsics::_dtan:
 893         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 894         break;
 895       case vmIntrinsics::_dlog:
 896         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 897         break;
 898       case vmIntrinsics::_dlog10:
 899         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 900         break;
 901       case vmIntrinsics::_dexp:
 902         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 903         break;
 904       default:
 905         ShouldNotReachHere();
 906       }
 907 
 908       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
 909       set_result(x, result);
 910       break;
 911     }
 912     case vmIntrinsics::_dpow: {
 913       assert(x->number_of_arguments() == 2, "wrong type");
 914       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 915       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
 916       set_result(x, result);
 917       break;
 918     }
 919   }
 920 }
 921 
 922 
 923 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 924   assert(x->number_of_arguments() == 5, "wrong type");
 925 
 926   // Make all state_for calls early since they can emit code
 927   CodeEmitInfo* info = state_for(x, x->state());
 928 
 929   LIRItem src(x->argument_at(0), this);
 930   LIRItem src_pos(x->argument_at(1), this);
 931   LIRItem dst(x->argument_at(2), this);
 932   LIRItem dst_pos(x->argument_at(3), this);
 933   LIRItem length(x->argument_at(4), this);
 934 
 935   // operands for arraycopy must use fixed registers, otherwise
 936   // LinearScan will fail allocation (because arraycopy always needs a
 937   // call)
 938 
 939   // The java calling convention will give us enough registers
 940   // so that on the stub side the args will be perfect already.
 941   // On the other slow/special case side we call C and the arg
 942   // positions are not similar enough to pick one as the best.
 943   // Also because the java calling convention is a "shifted" version
 944   // of the C convention we can process the java args trivially into C
 945   // args without worry of overwriting during the xfer
 946 
 947   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 948   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 949   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 950   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 951   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 952 
 953   LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
 954 
 955   set_no_result(x);
 956 
 957   int flags;
 958   ciArrayKlass* expected_type;
 959   arraycopy_helper(x, &flags, &expected_type);
 960 
 961   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
 962 }
 963 
 964 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 965   assert(UseCRC32Intrinsics, "why are we here?");
 966   // Make all state_for calls early since they can emit code
 967   LIR_Opr result = rlock_result(x);
 968   int flags = 0;
 969   switch (x->id()) {
 970     case vmIntrinsics::_updateCRC32: {
 971       LIRItem crc(x->argument_at(0), this);
 972       LIRItem val(x->argument_at(1), this);
 973       // val is destroyed by update_crc32
 974       val.set_destroys_register();
 975       crc.load_item();
 976       val.load_item();
 977       __ update_crc32(crc.result(), val.result(), result);
 978       break;
 979     }
 980     case vmIntrinsics::_updateBytesCRC32:
 981     case vmIntrinsics::_updateByteBufferCRC32: {
 982       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
 983 
 984       LIRItem crc(x->argument_at(0), this);
 985       LIRItem buf(x->argument_at(1), this);
 986       LIRItem off(x->argument_at(2), this);
 987       LIRItem len(x->argument_at(3), this);
 988       buf.load_item();
 989       off.load_nonconstant();
 990 
 991       LIR_Opr index = off.result();
 992       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
 993       if(off.result()->is_constant()) {
 994         index = LIR_OprFact::illegalOpr;
 995        offset += off.result()->as_jint();
 996       }
 997       LIR_Opr base_op = buf.result();
 998 
 999       if (index->is_valid()) {
1000         LIR_Opr tmp = new_register(T_LONG);
1001         __ convert(Bytecodes::_i2l, index, tmp);
1002         index = tmp;
1003       }
1004 
1005       if (offset) {
1006         LIR_Opr tmp = new_pointer_register();
1007         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1008         base_op = tmp;
1009         offset = 0;
1010       }
1011 
1012       LIR_Address* a = new LIR_Address(base_op,
1013                                        index,
1014                                        LIR_Address::times_1,
1015                                        offset,
1016                                        T_BYTE);
1017       BasicTypeList signature(3);
1018       signature.append(T_INT);
1019       signature.append(T_ADDRESS);
1020       signature.append(T_INT);
1021       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1022       const LIR_Opr result_reg = result_register_for(x->type());
1023 
1024       LIR_Opr addr = new_pointer_register();
1025       __ leal(LIR_OprFact::address(a), addr);
1026 
1027       crc.load_item_force(cc->at(0));
1028       __ move(addr, cc->at(1));
1029       len.load_item_force(cc->at(2));
1030 
1031       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1032       __ move(result_reg, result);
1033 
1034       break;
1035     }
1036     default: {
1037       ShouldNotReachHere();
1038     }
1039   }
1040 }
1041 
1042 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1043 // _i2b, _i2c, _i2s
1044 void LIRGenerator::do_Convert(Convert* x) {
1045   bool needs_stub;
1046 
1047   switch (x->op()) {
1048     case Bytecodes::_i2l:
1049     case Bytecodes::_l2i:
1050     case Bytecodes::_i2b:
1051     case Bytecodes::_i2c:
1052     case Bytecodes::_i2s:
1053     case Bytecodes::_f2d:
1054     case Bytecodes::_d2f:
1055     case Bytecodes::_i2f:
1056     case Bytecodes::_i2d:
1057     case Bytecodes::_l2f:
1058     case Bytecodes::_l2d: needs_stub = false;
1059       break;
1060     case Bytecodes::_f2l:
1061     case Bytecodes::_d2l:
1062     case Bytecodes::_f2i:
1063     case Bytecodes::_d2i: needs_stub = true;
1064       break;
1065     default: ShouldNotReachHere();
1066   }
1067 
1068   LIRItem value(x->value(), this);
1069   value.load_item();
1070   LIR_Opr input = value.result();
1071   LIR_Opr result = rlock(x);
1072 
1073   // arguments of lir_convert
1074   LIR_Opr conv_input = input;
1075   LIR_Opr conv_result = result;
1076   ConversionStub* stub = NULL;
1077 
1078   if (needs_stub) {
1079     stub = new ConversionStub(x->op(), conv_input, conv_result);
1080   }
1081 
1082   __ convert(x->op(), conv_input, conv_result, stub, new_register(T_INT));
1083 
1084   assert(result->is_virtual(), "result must be virtual register");
1085   set_result(x, result);
1086 }
1087 
1088 void LIRGenerator::do_NewInstance(NewInstance* x) {
1089 #ifndef PRODUCT
1090   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1091     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1092   }
1093 #endif
1094   CodeEmitInfo* info = state_for(x, x->state());
1095   LIR_Opr reg = result_register_for(x->type());
1096   new_instance(reg, x->klass(), x->is_unresolved(),
1097                        FrameMap::r2_oop_opr,
1098                        FrameMap::r5_oop_opr,
1099                        FrameMap::r4_oop_opr,
1100                        LIR_OprFact::illegalOpr,
1101                        FrameMap::r3_metadata_opr, info);
1102   LIR_Opr result = rlock_result(x);
1103   __ move(reg, result);
1104 }
1105 
1106 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1107   CodeEmitInfo* info = state_for(x, x->state());
1108 
1109   LIRItem length(x->length(), this);
1110   length.load_item_force(FrameMap::r19_opr);
1111 
1112   LIR_Opr reg = result_register_for(x->type());
1113   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1114   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1115   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1116   LIR_Opr tmp4 = reg;
1117   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1118   LIR_Opr len = length.result();
1119   BasicType elem_type = x->elt_type();
1120 
1121   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1122 
1123   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1124   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1125 
1126   LIR_Opr result = rlock_result(x);
1127   __ move(reg, result);
1128 }
1129 
1130 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1131   LIRItem length(x->length(), this);
1132   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1133   // and therefore provide the state before the parameters have been consumed
1134   CodeEmitInfo* patching_info = NULL;
1135   if (!x->klass()->is_loaded() || PatchALot) {
1136     patching_info =  state_for(x, x->state_before());
1137   }
1138 
1139   CodeEmitInfo* info = state_for(x, x->state());
1140 
1141   LIR_Opr reg = result_register_for(x->type());
1142   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1143   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1144   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1145   LIR_Opr tmp4 = reg;
1146   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1147 
1148   length.load_item_force(FrameMap::r19_opr);
1149   LIR_Opr len = length.result();
1150 
1151   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1152   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1153   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1154     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1155   }
1156   klass2reg_with_patching(klass_reg, obj, patching_info);
1157   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1158 
1159   LIR_Opr result = rlock_result(x);
1160   __ move(reg, result);
1161 }
1162 
1163 
1164 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1165   Values* dims = x->dims();
1166   int i = dims->length();
1167   LIRItemList* items = new LIRItemList(dims->length(), NULL);
1168   while (i-- > 0) {
1169     LIRItem* size = new LIRItem(dims->at(i), this);
1170     items->at_put(i, size);
1171   }
1172 
1173   // Evaluate state_for early since it may emit code.
1174   CodeEmitInfo* patching_info = NULL;
1175   if (!x->klass()->is_loaded() || PatchALot) {
1176     patching_info = state_for(x, x->state_before());
1177 
1178     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1179     // clone all handlers (NOTE: Usually this is handled transparently
1180     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1181     // is done explicitly here because a stub isn't being used).
1182     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1183   }
1184   CodeEmitInfo* info = state_for(x, x->state());
1185 
1186   i = dims->length();
1187   while (i-- > 0) {
1188     LIRItem* size = items->at(i);
1189     size->load_item();
1190 
1191     store_stack_parameter(size->result(), in_ByteSize(i*4));
1192   }
1193 
1194   LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1195   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1196 
1197   LIR_Opr rank = FrameMap::r19_opr;
1198   __ move(LIR_OprFact::intConst(x->rank()), rank);
1199   LIR_Opr varargs = FrameMap::r2_opr;
1200   __ move(FrameMap::sp_opr, varargs);
1201   LIR_OprList* args = new LIR_OprList(3);
1202   args->append(klass_reg);
1203   args->append(rank);
1204   args->append(varargs);
1205   LIR_Opr reg = result_register_for(x->type());
1206   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1207                   LIR_OprFact::illegalOpr,
1208                   reg, args, info);
1209 
1210   LIR_Opr result = rlock_result(x);
1211   __ move(reg, result);
1212 }
1213 
1214 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1215   // nothing to do for now
1216 }
1217 
1218 void LIRGenerator::do_CheckCast(CheckCast* x) {
1219   LIRItem obj(x->obj(), this);
1220 
1221   CodeEmitInfo* patching_info = NULL;
1222   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1223     // must do this before locking the destination register as an oop register,
1224     // and before the obj is loaded (the latter is for deoptimization)
1225     patching_info = state_for(x, x->state_before());
1226   }
1227   obj.load_item();
1228 
1229   // info for exceptions
1230   CodeEmitInfo* info_for_exception =
1231     (x->needs_exception_state() ? state_for(x) :
1232      state_for(x, x->state_before(), true /*ignore_xhandler*/));
1233 
1234   CodeStub* stub;
1235   if (x->is_incompatible_class_change_check()) {
1236     assert(patching_info == NULL, "can't patch this");
1237     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1238   } else if (x->is_invokespecial_receiver_check()) {
1239     assert(patching_info == NULL, "can't patch this");
1240     stub = new DeoptimizeStub(info_for_exception);
1241   } else {
1242     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1243   }
1244   LIR_Opr reg = rlock_result(x);
1245   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1246   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1247     tmp3 = new_register(objectType);
1248   }
1249   __ checkcast(reg, obj.result(), x->klass(),
1250                new_register(objectType), new_register(objectType), tmp3,
1251                x->direct_compare(), info_for_exception, patching_info, stub,
1252                x->profiled_method(), x->profiled_bci());
1253 }
1254 
1255 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1256   LIRItem obj(x->obj(), this);
1257 
1258   // result and test object may not be in same register
1259   LIR_Opr reg = rlock_result(x);
1260   CodeEmitInfo* patching_info = NULL;
1261   if ((!x->klass()->is_loaded() || PatchALot)) {
1262     // must do this before locking the destination register as an oop register
1263     patching_info = state_for(x, x->state_before());
1264   }
1265   obj.load_item();
1266   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1267   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1268     tmp3 = new_register(objectType);
1269   }
1270   __ instanceof(reg, obj.result(), x->klass(),
1271                 new_register(objectType), new_register(objectType), tmp3,
1272                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1273 }
1274 
1275 void LIRGenerator::do_If(If* x) {
1276   assert(x->number_of_sux() == 2, "inconsistency");
1277   ValueTag tag = x->x()->type()->tag();
1278   bool is_safepoint = x->is_safepoint();
1279 
1280   If::Condition cond = x->cond();
1281 
1282   LIRItem xitem(x->x(), this);
1283   LIRItem yitem(x->y(), this);
1284   LIRItem* xin = &xitem;
1285   LIRItem* yin = &yitem;
1286 
1287   if (tag == longTag) {
1288     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1289     // mirror for other conditions
1290     if (cond == If::gtr || cond == If::leq) {
1291       cond = Instruction::mirror(cond);
1292       xin = &yitem;
1293       yin = &xitem;
1294     }
1295     xin->set_destroys_register();
1296   }
1297   xin->load_item();
1298 
1299   if (tag == longTag) {
1300     if (yin->is_constant()
1301         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1302       yin->dont_load_item();
1303     } else {
1304       yin->load_item();
1305     }
1306   } else if (tag == intTag) {
1307     if (yin->is_constant()
1308         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant()))  {
1309       yin->dont_load_item();
1310     } else {
1311       yin->load_item();
1312     }
1313   } else {
1314     yin->load_item();
1315   }
1316 
1317   // add safepoint before generating condition code so it can be recomputed
1318   if (x->is_safepoint()) {
1319     // increment backedge counter if needed
1320     increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1321     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1322   }
1323   set_no_result(x);
1324 
1325   LIR_Opr left = xin->result();
1326   LIR_Opr right = yin->result();
1327 
1328   __ cmp(lir_cond(cond), left, right);
1329   // Generate branch profiling. Profiling code doesn't kill flags.
1330   profile_branch(x, cond);
1331   move_to_phi(x->state());
1332   if (x->x()->type()->is_float_kind()) {
1333     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1334   } else {
1335     __ branch(lir_cond(cond), right->type(), x->tsux());
1336   }
1337   assert(x->default_sux() == x->fsux(), "wrong destination above");
1338   __ jump(x->default_sux());
1339 }
1340 
1341 LIR_Opr LIRGenerator::getThreadPointer() {
1342    return FrameMap::as_pointer_opr(rthread);
1343 }
1344 
1345 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1346 
1347 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1348                                         CodeEmitInfo* info) {
1349   __ volatile_store_mem_reg(value, address, info);
1350 }
1351 
1352 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1353                                        CodeEmitInfo* info) {
1354 
1355   // 8179954: We need to make sure that the code generated for
1356   // volatile accesses forms a sequentially-consistent set of
1357   // operations when combined with STLR and LDAR.  Without a leading
1358   // membar it's possible for a simple Dekker test to fail if loads
1359   // use LD;DMB but stores use STLR.  This can happen if C2 compiles
1360   // the stores in one method and C1 compiles the loads in another.
1361   if (! UseBarriersForVolatile) {
1362     __ membar();
1363   }
1364 
1365   __ volatile_load_mem_reg(address, result, info);
1366 }
1367 
1368 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1369                                      BasicType type, bool is_volatile) {
1370   LIR_Address* addr = new LIR_Address(src, offset, type);
1371   __ load(addr, dst);
1372 }
1373 
1374 
1375 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1376                                      BasicType type, bool is_volatile) {
1377   LIR_Address* addr = new LIR_Address(src, offset, type);
1378   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1379   if (is_obj) {
1380     // Do the pre-write barrier, if any.
1381     pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1382                 true /* do_load */, false /* patch */, NULL);
1383     __ move(data, addr);
1384     assert(src->is_register(), "must be register");
1385     // Seems to be a precise address
1386     post_barrier(LIR_OprFact::address(addr), data);
1387   } else {
1388     __ move(data, addr);
1389   }
1390 }
1391 
1392 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1393   BasicType type = x->basic_type();
1394   LIRItem src(x->object(), this);
1395   LIRItem off(x->offset(), this);
1396   LIRItem value(x->value(), this);
1397 
1398   src.load_item();
1399   off.load_nonconstant();
1400 
1401   // We can cope with a constant increment in an xadd
1402   if (! (x->is_add()
1403          && value.is_constant()
1404          && can_inline_as_constant(x->value()))) {
1405     value.load_item();
1406   }
1407 
1408   LIR_Opr dst = rlock_result(x, type);
1409   LIR_Opr data = value.result();
1410   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1411   LIR_Opr offset = off.result();
1412 
1413   if (data == dst) {
1414     LIR_Opr tmp = new_register(data->type());
1415     __ move(data, tmp);
1416     data = tmp;
1417   }
1418 
1419   LIR_Opr src_op = src.result();
1420 
1421   LIR_Address* addr;
1422   if (offset->is_constant()) {
1423     jlong l = offset->as_jlong();
1424     assert((jlong)((jint)l) == l, "offset too large for constant");
1425     jint c = (jint)l;
1426     addr = new LIR_Address(src.result(), c, type);
1427   } else {
1428     addr = new LIR_Address(src.result(), offset, type);
1429   }
1430 
1431   LIR_Opr tmp = new_register(T_INT);
1432   LIR_Opr ptr = LIR_OprFact::illegalOpr;
1433 
1434   if (x->is_add()) {
1435     __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
1436   } else {
1437     if (is_obj) {
1438       // Do the pre-write barrier, if any.
1439       ptr = new_pointer_register();
1440       __ add(src_op, off.result(), ptr);
1441       pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1442                   true /* do_load */, false /* patch */, NULL);
1443     }
1444     __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1445 #if INCLUDE_ALL_GCS
1446     if (UseShenandoahGC && is_obj) {
1447       LIR_Opr tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, dst);
1448       __ move(tmp, dst);
1449     }
1450 #endif
1451     if (is_obj) {
1452       post_barrier(ptr, data);
1453     }
1454   }
1455 }