1 /*
   2  * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "compiler/compilerOracle.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/c1/barrierSetC1.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "oops/methodCounters.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/vm_version.hpp"
  46 #include "utilities/bitMap.inline.hpp"
  47 #include "utilities/macros.hpp"
  48 #include "utilities/powerOfTwo.hpp"
  49 
  50 #ifdef ASSERT
  51 #define __ gen()->lir(__FILE__, __LINE__)->
  52 #else
  53 #define __ gen()->lir()->
  54 #endif
  55 
  56 #ifndef PATCHED_ADDR
  57 #define PATCHED_ADDR  (max_jint)
  58 #endif
  59 
  60 void PhiResolverState::reset() {
  61   _virtual_operands.clear();
  62   _other_operands.clear();
  63   _vreg_table.clear();
  64 }
  65 
  66 
  67 //--------------------------------------------------------------
  68 // PhiResolver
  69 
  70 // Resolves cycles:
  71 //
  72 //  r1 := r2  becomes  temp := r1
  73 //  r2 := r1           r1 := r2
  74 //                     r2 := temp
  75 // and orders moves:
  76 //
  77 //  r2 := r3  becomes  r1 := r2
  78 //  r1 := r2           r2 := r3
  79 
  80 PhiResolver::PhiResolver(LIRGenerator* gen)
  81  : _gen(gen)
  82  , _state(gen->resolver_state())
  83  , _loop(nullptr)
  84  , _temp(LIR_OprFact::illegalOpr)
  85 {
  86   // reinitialize the shared state arrays
  87   _state.reset();
  88 }
  89 
  90 
  91 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
  92   assert(src->is_valid(), "");
  93   assert(dest->is_valid(), "");
  94   __ move(src, dest);
  95 }
  96 
  97 
  98 void PhiResolver::move_temp_to(LIR_Opr dest) {
  99   assert(_temp->is_valid(), "");
 100   emit_move(_temp, dest);
 101   NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
 102 }
 103 
 104 
 105 void PhiResolver::move_to_temp(LIR_Opr src) {
 106   assert(_temp->is_illegal(), "");
 107   _temp = _gen->new_register(src->type());
 108   emit_move(src, _temp);
 109 }
 110 
 111 
 112 // Traverse assignment graph in depth first order and generate moves in post order
 113 // ie. two assignments: b := c, a := b start with node c:
 114 // Call graph: move(null, c) -> move(c, b) -> move(b, a)
 115 // Generates moves in this order: move b to a and move c to b
 116 // ie. cycle a := b, b := a start with node a
 117 // Call graph: move(null, a) -> move(a, b) -> move(b, a)
 118 // Generates moves in this order: move b to temp, move a to b, move temp to a
 119 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
 120   if (!dest->visited()) {
 121     dest->set_visited();
 122     for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
 123       move(dest, dest->destination_at(i));
 124     }
 125   } else if (!dest->start_node()) {
 126     // cylce in graph detected
 127     assert(_loop == nullptr, "only one loop valid!");
 128     _loop = dest;
 129     move_to_temp(src->operand());
 130     return;
 131   } // else dest is a start node
 132 
 133   if (!dest->assigned()) {
 134     if (_loop == dest) {
 135       move_temp_to(dest->operand());
 136       dest->set_assigned();
 137     } else if (src != nullptr) {
 138       emit_move(src->operand(), dest->operand());
 139       dest->set_assigned();
 140     }
 141   }
 142 }
 143 
 144 
 145 PhiResolver::~PhiResolver() {
 146   int i;
 147   // resolve any cycles in moves from and to virtual registers
 148   for (i = virtual_operands().length() - 1; i >= 0; i --) {
 149     ResolveNode* node = virtual_operands().at(i);
 150     if (!node->visited()) {
 151       _loop = nullptr;
 152       move(nullptr, node);
 153       node->set_start_node();
 154       assert(_temp->is_illegal(), "move_temp_to() call missing");
 155     }
 156   }
 157 
 158   // generate move for move from non virtual register to abitrary destination
 159   for (i = other_operands().length() - 1; i >= 0; i --) {
 160     ResolveNode* node = other_operands().at(i);
 161     for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
 162       emit_move(node->operand(), node->destination_at(j)->operand());
 163     }
 164   }
 165 }
 166 
 167 
 168 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
 169   ResolveNode* node;
 170   if (opr->is_virtual()) {
 171     int vreg_num = opr->vreg_number();
 172     node = vreg_table().at_grow(vreg_num, nullptr);
 173     assert(node == nullptr || node->operand() == opr, "");
 174     if (node == nullptr) {
 175       node = new ResolveNode(opr);
 176       vreg_table().at_put(vreg_num, node);
 177     }
 178     // Make sure that all virtual operands show up in the list when
 179     // they are used as the source of a move.
 180     if (source && !virtual_operands().contains(node)) {
 181       virtual_operands().append(node);
 182     }
 183   } else {
 184     assert(source, "");
 185     node = new ResolveNode(opr);
 186     other_operands().append(node);
 187   }
 188   return node;
 189 }
 190 
 191 
 192 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
 193   assert(dest->is_virtual(), "");
 194   // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
 195   assert(src->is_valid(), "");
 196   assert(dest->is_valid(), "");
 197   ResolveNode* source = source_node(src);
 198   source->append(destination_node(dest));
 199 }
 200 
 201 
 202 //--------------------------------------------------------------
 203 // LIRItem
 204 
 205 void LIRItem::set_result(LIR_Opr opr) {
 206   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 207   value()->set_operand(opr);
 208 
 209 #ifdef ASSERT
 210   if (opr->is_virtual()) {
 211     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
 212   }
 213 #endif
 214 
 215   _result = opr;
 216 }
 217 
 218 void LIRItem::load_item() {
 219   if (result()->is_illegal()) {
 220     // update the items result
 221     _result = value()->operand();
 222   }
 223   if (!result()->is_register()) {
 224     LIR_Opr reg = _gen->new_register(value()->type());
 225     __ move(result(), reg);
 226     if (result()->is_constant()) {
 227       _result = reg;
 228     } else {
 229       set_result(reg);
 230     }
 231   }
 232 }
 233 
 234 
 235 void LIRItem::load_for_store(BasicType type) {
 236   if (_gen->can_store_as_constant(value(), type)) {
 237     _result = value()->operand();
 238     if (!_result->is_constant()) {
 239       _result = LIR_OprFact::value_type(value()->type());
 240     }
 241   } else if (type == T_BYTE || type == T_BOOLEAN) {
 242     load_byte_item();
 243   } else {
 244     load_item();
 245   }
 246 }
 247 
 248 void LIRItem::load_item_force(LIR_Opr reg) {
 249   LIR_Opr r = result();
 250   if (r != reg) {
 251 #if !defined(ARM) && !defined(E500V2)
 252     if (r->type() != reg->type()) {
 253       // moves between different types need an intervening spill slot
 254       r = _gen->force_to_spill(r, reg->type());
 255     }
 256 #endif
 257     __ move(r, reg);
 258     _result = reg;
 259   }
 260 }
 261 
 262 ciObject* LIRItem::get_jobject_constant() const {
 263   ObjectType* oc = type()->as_ObjectType();
 264   if (oc) {
 265     return oc->constant_value();
 266   }
 267   return nullptr;
 268 }
 269 
 270 
 271 jint LIRItem::get_jint_constant() const {
 272   assert(is_constant() && value() != nullptr, "");
 273   assert(type()->as_IntConstant() != nullptr, "type check");
 274   return type()->as_IntConstant()->value();
 275 }
 276 
 277 
 278 jint LIRItem::get_address_constant() const {
 279   assert(is_constant() && value() != nullptr, "");
 280   assert(type()->as_AddressConstant() != nullptr, "type check");
 281   return type()->as_AddressConstant()->value();
 282 }
 283 
 284 
 285 jfloat LIRItem::get_jfloat_constant() const {
 286   assert(is_constant() && value() != nullptr, "");
 287   assert(type()->as_FloatConstant() != nullptr, "type check");
 288   return type()->as_FloatConstant()->value();
 289 }
 290 
 291 
 292 jdouble LIRItem::get_jdouble_constant() const {
 293   assert(is_constant() && value() != nullptr, "");
 294   assert(type()->as_DoubleConstant() != nullptr, "type check");
 295   return type()->as_DoubleConstant()->value();
 296 }
 297 
 298 
 299 jlong LIRItem::get_jlong_constant() const {
 300   assert(is_constant() && value() != nullptr, "");
 301   assert(type()->as_LongConstant() != nullptr, "type check");
 302   return type()->as_LongConstant()->value();
 303 }
 304 
 305 
 306 
 307 //--------------------------------------------------------------
 308 
 309 
 310 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 311 #ifndef PRODUCT
 312   if (PrintIRWithLIR) {
 313     block->print();
 314   }
 315 #endif
 316 
 317   // set up the list of LIR instructions
 318   assert(block->lir() == nullptr, "LIR list already computed for this block");
 319   _lir = new LIR_List(compilation(), block);
 320   block->set_lir(_lir);
 321 
 322   __ branch_destination(block->label());
 323 
 324   if (LIRTraceExecution &&
 325       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
 326       !block->is_set(BlockBegin::exception_entry_flag)) {
 327     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
 328     trace_block_entry(block);
 329   }
 330 }
 331 
 332 
 333 void LIRGenerator::block_do_epilog(BlockBegin* block) {
 334 #ifndef PRODUCT
 335   if (PrintIRWithLIR) {
 336     tty->cr();
 337   }
 338 #endif
 339 
 340   // LIR_Opr for unpinned constants shouldn't be referenced by other
 341   // blocks so clear them out after processing the block.
 342   for (int i = 0; i < _unpinned_constants.length(); i++) {
 343     _unpinned_constants.at(i)->clear_operand();
 344   }
 345   _unpinned_constants.trunc_to(0);
 346 
 347   // clear our any registers for other local constants
 348   _constants.trunc_to(0);
 349   _reg_for_constants.trunc_to(0);
 350 }
 351 
 352 
 353 void LIRGenerator::block_do(BlockBegin* block) {
 354   CHECK_BAILOUT();
 355 
 356   block_do_prolog(block);
 357   set_block(block);
 358 
 359   for (Instruction* instr = block; instr != nullptr; instr = instr->next()) {
 360     if (instr->is_pinned()) do_root(instr);
 361   }
 362 
 363   set_block(nullptr);
 364   block_do_epilog(block);
 365 }
 366 
 367 
 368 //-------------------------LIRGenerator-----------------------------
 369 
 370 // This is where the tree-walk starts; instr must be root;
 371 void LIRGenerator::do_root(Value instr) {
 372   CHECK_BAILOUT();
 373 
 374   InstructionMark im(compilation(), instr);
 375 
 376   assert(instr->is_pinned(), "use only with roots");
 377   assert(instr->subst() == instr, "shouldn't have missed substitution");
 378 
 379   instr->visit(this);
 380 
 381   assert(!instr->has_uses() || instr->operand()->is_valid() ||
 382          instr->as_Constant() != nullptr || bailed_out(), "invalid item set");
 383 }
 384 
 385 
 386 // This is called for each node in tree; the walk stops if a root is reached
 387 void LIRGenerator::walk(Value instr) {
 388   InstructionMark im(compilation(), instr);
 389   //stop walk when encounter a root
 390   if ((instr->is_pinned() && instr->as_Phi() == nullptr) || instr->operand()->is_valid()) {
 391     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != nullptr, "this root has not yet been visited");
 392   } else {
 393     assert(instr->subst() == instr, "shouldn't have missed substitution");
 394     instr->visit(this);
 395     // assert(instr->use_count() > 0 || instr->as_Phi() != nullptr, "leaf instruction must have a use");
 396   }
 397 }
 398 
 399 
 400 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
 401   assert(state != nullptr, "state must be defined");
 402 
 403 #ifndef PRODUCT
 404   state->verify();
 405 #endif
 406 
 407   ValueStack* s = state;
 408   for_each_state(s) {
 409     if (s->kind() == ValueStack::EmptyExceptionState ||
 410         s->kind() == ValueStack::CallerEmptyExceptionState)
 411     {
 412 #ifdef ASSERT
 413       int index;
 414       Value value;
 415       for_each_stack_value(s, index, value) {
 416         fatal("state must be empty");
 417       }
 418       for_each_local_value(s, index, value) {
 419         fatal("state must be empty");
 420       }
 421 #endif
 422       assert(s->locks_size() == 0 || s->locks_size() == 1, "state must be empty");
 423       continue;
 424     }
 425 
 426     int index;
 427     Value value;
 428     for_each_stack_value(s, index, value) {
 429       assert(value->subst() == value, "missed substitution");
 430       if (!value->is_pinned() && value->as_Constant() == nullptr && value->as_Local() == nullptr) {
 431         walk(value);
 432         assert(value->operand()->is_valid(), "must be evaluated now");
 433       }
 434     }
 435 
 436     int bci = s->bci();
 437     IRScope* scope = s->scope();
 438     ciMethod* method = scope->method();
 439 
 440     MethodLivenessResult liveness = method->liveness_at_bci(bci);
 441     if (bci == SynchronizationEntryBCI) {
 442       if (x->as_ExceptionObject() || x->as_Throw()) {
 443         // all locals are dead on exit from the synthetic unlocker
 444         liveness.clear();
 445       } else {
 446         assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
 447       }
 448     }
 449     if (!liveness.is_valid()) {
 450       // Degenerate or breakpointed method.
 451       bailout("Degenerate or breakpointed method");
 452     } else {
 453       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
 454       for_each_local_value(s, index, value) {
 455         assert(value->subst() == value, "missed substitution");
 456         if (liveness.at(index) && !value->type()->is_illegal()) {
 457           if (!value->is_pinned() && value->as_Constant() == nullptr && value->as_Local() == nullptr) {
 458             walk(value);
 459             assert(value->operand()->is_valid(), "must be evaluated now");
 460           }
 461         } else {
 462           // null out this local so that linear scan can assume that all non-null values are live.
 463           s->invalidate_local(index);
 464         }
 465       }
 466     }
 467   }
 468 
 469   return new CodeEmitInfo(state, ignore_xhandler ? nullptr : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
 470 }
 471 
 472 
 473 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
 474   return state_for(x, x->exception_state());
 475 }
 476 
 477 
 478 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
 479   /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
 480    * is active and the class hasn't yet been resolved we need to emit a patch that resolves
 481    * the class. */
 482   if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
 483     assert(info != nullptr, "info must be set if class is not loaded");
 484     __ klass2reg_patch(nullptr, r, info);
 485   } else {
 486     // no patching needed
 487     __ metadata2reg(obj->constant_encoding(), r);
 488   }
 489 }
 490 
 491 
 492 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
 493                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
 494   CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
 495   if (index->is_constant()) {
 496     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
 497                 index->as_jint(), null_check_info);
 498     __ branch(lir_cond_belowEqual, stub); // forward branch
 499   } else {
 500     cmp_reg_mem(lir_cond_aboveEqual, index, array,
 501                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
 502     __ branch(lir_cond_aboveEqual, stub); // forward branch
 503   }
 504 }
 505 
 506 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp_op, CodeEmitInfo* info) {
 507   LIR_Opr result_op = result;
 508   LIR_Opr left_op   = left;
 509   LIR_Opr right_op  = right;
 510 
 511   if (two_operand_lir_form && left_op != result_op) {
 512     assert(right_op != result_op, "malformed");
 513     __ move(left_op, result_op);
 514     left_op = result_op;
 515   }
 516 
 517   switch(code) {
 518     case Bytecodes::_dadd:
 519     case Bytecodes::_fadd:
 520     case Bytecodes::_ladd:
 521     case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
 522     case Bytecodes::_fmul:
 523     case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
 524 
 525     case Bytecodes::_dmul:  __ mul(left_op, right_op, result_op, tmp_op); break;
 526 
 527     case Bytecodes::_imul:
 528       {
 529         bool did_strength_reduce = false;
 530 
 531         if (right->is_constant()) {
 532           jint c = right->as_jint();
 533           if (c > 0 && is_power_of_2(c)) {
 534             // do not need tmp here
 535             __ shift_left(left_op, exact_log2(c), result_op);
 536             did_strength_reduce = true;
 537           } else {
 538             did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
 539           }
 540         }
 541         // we couldn't strength reduce so just emit the multiply
 542         if (!did_strength_reduce) {
 543           __ mul(left_op, right_op, result_op);
 544         }
 545       }
 546       break;
 547 
 548     case Bytecodes::_dsub:
 549     case Bytecodes::_fsub:
 550     case Bytecodes::_lsub:
 551     case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
 552 
 553     case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
 554     // ldiv and lrem are implemented with a direct runtime call
 555 
 556     case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break;
 557 
 558     case Bytecodes::_drem:
 559     case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
 560 
 561     default: ShouldNotReachHere();
 562   }
 563 }
 564 
 565 
 566 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 567   arithmetic_op(code, result, left, right, tmp);
 568 }
 569 
 570 
 571 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
 572   arithmetic_op(code, result, left, right, LIR_OprFact::illegalOpr, info);
 573 }
 574 
 575 
 576 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 577   arithmetic_op(code, result, left, right, tmp);
 578 }
 579 
 580 
 581 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
 582 
 583   if (two_operand_lir_form && value != result_op
 584       // Only 32bit right shifts require two operand form on S390.
 585       S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
 586     assert(count != result_op, "malformed");
 587     __ move(value, result_op);
 588     value = result_op;
 589   }
 590 
 591   assert(count->is_constant() || count->is_register(), "must be");
 592   switch(code) {
 593   case Bytecodes::_ishl:
 594   case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
 595   case Bytecodes::_ishr:
 596   case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
 597   case Bytecodes::_iushr:
 598   case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
 599   default: ShouldNotReachHere();
 600   }
 601 }
 602 
 603 
 604 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
 605   if (two_operand_lir_form && left_op != result_op) {
 606     assert(right_op != result_op, "malformed");
 607     __ move(left_op, result_op);
 608     left_op = result_op;
 609   }
 610 
 611   switch(code) {
 612     case Bytecodes::_iand:
 613     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 614 
 615     case Bytecodes::_ior:
 616     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 617 
 618     case Bytecodes::_ixor:
 619     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 620 
 621     default: ShouldNotReachHere();
 622   }
 623 }
 624 
 625 
 626 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
 627   if (!GenerateSynchronizationCode) return;
 628   // for slow path, use debug info for state after successful locking
 629   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 630   __ load_stack_address_monitor(monitor_no, lock);
 631   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 632   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 633 }
 634 
 635 
 636 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 637   if (!GenerateSynchronizationCode) return;
 638   // setup registers
 639   LIR_Opr hdr = lock;
 640   lock = new_hdr;
 641   CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
 642   __ load_stack_address_monitor(monitor_no, lock);
 643   __ unlock_object(hdr, object, lock, scratch, slow_path);
 644 }
 645 
 646 #ifndef PRODUCT
 647 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 648   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 649     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 650   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 651     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 652   }
 653 }
 654 #endif
 655 
 656 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 657   klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 658   // If klass is not loaded we do not know if the klass has finalizers:
 659   if (UseFastNewInstance && klass->is_loaded()
 660       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 661 
 662     Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
 663 
 664     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 665 
 666     assert(klass->is_loaded(), "must be loaded");
 667     // allocate space for instance
 668     assert(klass->size_helper() > 0, "illegal instance size");
 669     const int instance_size = align_object_size(klass->size_helper());
 670     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 671                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 672   } else {
 673     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
 674     __ branch(lir_cond_always, slow_path);
 675     __ branch_destination(slow_path->continuation());
 676   }
 677 }
 678 
 679 
 680 static bool is_constant_zero(Instruction* inst) {
 681   IntConstant* c = inst->type()->as_IntConstant();
 682   if (c) {
 683     return (c->value() == 0);
 684   }
 685   return false;
 686 }
 687 
 688 
 689 static bool positive_constant(Instruction* inst) {
 690   IntConstant* c = inst->type()->as_IntConstant();
 691   if (c) {
 692     return (c->value() >= 0);
 693   }
 694   return false;
 695 }
 696 
 697 
 698 static ciArrayKlass* as_array_klass(ciType* type) {
 699   if (type != nullptr && type->is_array_klass() && type->is_loaded()) {
 700     return (ciArrayKlass*)type;
 701   } else {
 702     return nullptr;
 703   }
 704 }
 705 
 706 static ciType* phi_declared_type(Phi* phi) {
 707   ciType* t = phi->operand_at(0)->declared_type();
 708   if (t == nullptr) {
 709     return nullptr;
 710   }
 711   for(int i = 1; i < phi->operand_count(); i++) {
 712     if (t != phi->operand_at(i)->declared_type()) {
 713       return nullptr;
 714     }
 715   }
 716   return t;
 717 }
 718 
 719 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
 720   Instruction* src     = x->argument_at(0);
 721   Instruction* src_pos = x->argument_at(1);
 722   Instruction* dst     = x->argument_at(2);
 723   Instruction* dst_pos = x->argument_at(3);
 724   Instruction* length  = x->argument_at(4);
 725 
 726   // first try to identify the likely type of the arrays involved
 727   ciArrayKlass* expected_type = nullptr;
 728   bool is_exact = false, src_objarray = false, dst_objarray = false;
 729   {
 730     ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
 731     ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
 732     Phi* phi;
 733     if (src_declared_type == nullptr && (phi = src->as_Phi()) != nullptr) {
 734       src_declared_type = as_array_klass(phi_declared_type(phi));
 735     }
 736     ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
 737     ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
 738     if (dst_declared_type == nullptr && (phi = dst->as_Phi()) != nullptr) {
 739       dst_declared_type = as_array_klass(phi_declared_type(phi));
 740     }
 741 
 742     if (src_exact_type != nullptr && src_exact_type == dst_exact_type) {
 743       // the types exactly match so the type is fully known
 744       is_exact = true;
 745       expected_type = src_exact_type;
 746     } else if (dst_exact_type != nullptr && dst_exact_type->is_obj_array_klass()) {
 747       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
 748       ciArrayKlass* src_type = nullptr;
 749       if (src_exact_type != nullptr && src_exact_type->is_obj_array_klass()) {
 750         src_type = (ciArrayKlass*) src_exact_type;
 751       } else if (src_declared_type != nullptr && src_declared_type->is_obj_array_klass()) {
 752         src_type = (ciArrayKlass*) src_declared_type;
 753       }
 754       if (src_type != nullptr) {
 755         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 756           is_exact = true;
 757           expected_type = dst_type;
 758         }
 759       }
 760     }
 761     // at least pass along a good guess
 762     if (expected_type == nullptr) expected_type = dst_exact_type;
 763     if (expected_type == nullptr) expected_type = src_declared_type;
 764     if (expected_type == nullptr) expected_type = dst_declared_type;
 765 
 766     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 767     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 768   }
 769 
 770   // if a probable array type has been identified, figure out if any
 771   // of the required checks for a fast case can be elided.
 772   int flags = LIR_OpArrayCopy::all_flags;
 773 
 774   if (!src_objarray)
 775     flags &= ~LIR_OpArrayCopy::src_objarray;
 776   if (!dst_objarray)
 777     flags &= ~LIR_OpArrayCopy::dst_objarray;
 778 
 779   if (!x->arg_needs_null_check(0))
 780     flags &= ~LIR_OpArrayCopy::src_null_check;
 781   if (!x->arg_needs_null_check(2))
 782     flags &= ~LIR_OpArrayCopy::dst_null_check;
 783 
 784 
 785   if (expected_type != nullptr) {
 786     Value length_limit = nullptr;
 787 
 788     IfOp* ifop = length->as_IfOp();
 789     if (ifop != nullptr) {
 790       // look for expressions like min(v, a.length) which ends up as
 791       //   x > y ? y : x  or  x >= y ? y : x
 792       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 793           ifop->x() == ifop->fval() &&
 794           ifop->y() == ifop->tval()) {
 795         length_limit = ifop->y();
 796       }
 797     }
 798 
 799     // try to skip null checks and range checks
 800     NewArray* src_array = src->as_NewArray();
 801     if (src_array != nullptr) {
 802       flags &= ~LIR_OpArrayCopy::src_null_check;
 803       if (length_limit != nullptr &&
 804           src_array->length() == length_limit &&
 805           is_constant_zero(src_pos)) {
 806         flags &= ~LIR_OpArrayCopy::src_range_check;
 807       }
 808     }
 809 
 810     NewArray* dst_array = dst->as_NewArray();
 811     if (dst_array != nullptr) {
 812       flags &= ~LIR_OpArrayCopy::dst_null_check;
 813       if (length_limit != nullptr &&
 814           dst_array->length() == length_limit &&
 815           is_constant_zero(dst_pos)) {
 816         flags &= ~LIR_OpArrayCopy::dst_range_check;
 817       }
 818     }
 819 
 820     // check from incoming constant values
 821     if (positive_constant(src_pos))
 822       flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
 823     if (positive_constant(dst_pos))
 824       flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
 825     if (positive_constant(length))
 826       flags &= ~LIR_OpArrayCopy::length_positive_check;
 827 
 828     // see if the range check can be elided, which might also imply
 829     // that src or dst is non-null.
 830     ArrayLength* al = length->as_ArrayLength();
 831     if (al != nullptr) {
 832       if (al->array() == src) {
 833         // it's the length of the source array
 834         flags &= ~LIR_OpArrayCopy::length_positive_check;
 835         flags &= ~LIR_OpArrayCopy::src_null_check;
 836         if (is_constant_zero(src_pos))
 837           flags &= ~LIR_OpArrayCopy::src_range_check;
 838       }
 839       if (al->array() == dst) {
 840         // it's the length of the destination array
 841         flags &= ~LIR_OpArrayCopy::length_positive_check;
 842         flags &= ~LIR_OpArrayCopy::dst_null_check;
 843         if (is_constant_zero(dst_pos))
 844           flags &= ~LIR_OpArrayCopy::dst_range_check;
 845       }
 846     }
 847     if (is_exact) {
 848       flags &= ~LIR_OpArrayCopy::type_check;
 849     }
 850   }
 851 
 852   IntConstant* src_int = src_pos->type()->as_IntConstant();
 853   IntConstant* dst_int = dst_pos->type()->as_IntConstant();
 854   if (src_int && dst_int) {
 855     int s_offs = src_int->value();
 856     int d_offs = dst_int->value();
 857     if (src_int->value() >= dst_int->value()) {
 858       flags &= ~LIR_OpArrayCopy::overlapping;
 859     }
 860     if (expected_type != nullptr) {
 861       BasicType t = expected_type->element_type()->basic_type();
 862       int element_size = type2aelembytes(t);
 863       if (((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) &&
 864           ((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0)) {
 865         flags &= ~LIR_OpArrayCopy::unaligned;
 866       }
 867     }
 868   } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
 869     // src and dest positions are the same, or dst is zero so assume
 870     // nonoverlapping copy.
 871     flags &= ~LIR_OpArrayCopy::overlapping;
 872   }
 873 
 874   if (src == dst) {
 875     // moving within a single array so no type checks are needed
 876     if (flags & LIR_OpArrayCopy::type_check) {
 877       flags &= ~LIR_OpArrayCopy::type_check;
 878     }
 879   }
 880   *flagsp = flags;
 881   *expected_typep = (ciArrayKlass*)expected_type;
 882 }
 883 
 884 
 885 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
 886   assert(opr->is_register(), "why spill if item is not register?");
 887 
 888   if (strict_fp_requires_explicit_rounding) {
 889 #ifdef IA32
 890     if (UseSSE < 1 && opr->is_single_fpu()) {
 891       LIR_Opr result = new_register(T_FLOAT);
 892       set_vreg_flag(result, must_start_in_memory);
 893       assert(opr->is_register(), "only a register can be spilled");
 894       assert(opr->value_type()->is_float(), "rounding only for floats available");
 895       __ roundfp(opr, LIR_OprFact::illegalOpr, result);
 896       return result;
 897     }
 898 #else
 899     Unimplemented();
 900 #endif // IA32
 901   }
 902   return opr;
 903 }
 904 
 905 
 906 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
 907   assert(type2size[t] == type2size[value->type()],
 908          "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
 909   if (!value->is_register()) {
 910     // force into a register
 911     LIR_Opr r = new_register(value->type());
 912     __ move(value, r);
 913     value = r;
 914   }
 915 
 916   // create a spill location
 917   LIR_Opr tmp = new_register(t);
 918   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
 919 
 920   // move from register to spill
 921   __ move(value, tmp);
 922   return tmp;
 923 }
 924 
 925 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
 926   if (if_instr->should_profile()) {
 927     ciMethod* method = if_instr->profiled_method();
 928     assert(method != nullptr, "method should be set if branch is profiled");
 929     ciMethodData* md = method->method_data_or_null();
 930     assert(md != nullptr, "Sanity");
 931     ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
 932     assert(data != nullptr, "must have profiling data");
 933     assert(data->is_BranchData(), "need BranchData for two-way branches");
 934     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
 935     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
 936     if (if_instr->is_swapped()) {
 937       int t = taken_count_offset;
 938       taken_count_offset = not_taken_count_offset;
 939       not_taken_count_offset = t;
 940     }
 941 
 942     LIR_Opr md_reg = new_register(T_METADATA);
 943     __ metadata2reg(md->constant_encoding(), md_reg);
 944 
 945     LIR_Opr data_offset_reg = new_pointer_register();
 946     __ cmove(lir_cond(cond),
 947              LIR_OprFact::intptrConst(taken_count_offset),
 948              LIR_OprFact::intptrConst(not_taken_count_offset),
 949              data_offset_reg, as_BasicType(if_instr->x()->type()));
 950 
 951     // MDO cells are intptr_t, so the data_reg width is arch-dependent.
 952     LIR_Opr data_reg = new_pointer_register();
 953     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
 954     __ move(data_addr, data_reg);
 955     // Use leal instead of add to avoid destroying condition codes on x86
 956     LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
 957     __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
 958     __ move(data_reg, data_addr);
 959   }
 960 }
 961 
 962 // Phi technique:
 963 // This is about passing live values from one basic block to the other.
 964 // In code generated with Java it is rather rare that more than one
 965 // value is on the stack from one basic block to the other.
 966 // We optimize our technique for efficient passing of one value
 967 // (of type long, int, double..) but it can be extended.
 968 // When entering or leaving a basic block, all registers and all spill
 969 // slots are release and empty. We use the released registers
 970 // and spill slots to pass the live values from one block
 971 // to the other. The topmost value, i.e., the value on TOS of expression
 972 // stack is passed in registers. All other values are stored in spilling
 973 // area. Every Phi has an index which designates its spill slot
 974 // At exit of a basic block, we fill the register(s) and spill slots.
 975 // At entry of a basic block, the block_prolog sets up the content of phi nodes
 976 // and locks necessary registers and spilling slots.
 977 
 978 
 979 // move current value to referenced phi function
 980 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
 981   Phi* phi = sux_val->as_Phi();
 982   // cur_val can be null without phi being null in conjunction with inlining
 983   if (phi != nullptr && cur_val != nullptr && cur_val != phi && !phi->is_illegal()) {
 984     if (phi->is_local()) {
 985       for (int i = 0; i < phi->operand_count(); i++) {
 986         Value op = phi->operand_at(i);
 987         if (op != nullptr && op->type()->is_illegal()) {
 988           bailout("illegal phi operand");
 989         }
 990       }
 991     }
 992     Phi* cur_phi = cur_val->as_Phi();
 993     if (cur_phi != nullptr && cur_phi->is_illegal()) {
 994       // Phi and local would need to get invalidated
 995       // (which is unexpected for Linear Scan).
 996       // But this case is very rare so we simply bail out.
 997       bailout("propagation of illegal phi");
 998       return;
 999     }
1000     LIR_Opr operand = cur_val->operand();
1001     if (operand->is_illegal()) {
1002       assert(cur_val->as_Constant() != nullptr || cur_val->as_Local() != nullptr,
1003              "these can be produced lazily");
1004       operand = operand_for_instruction(cur_val);
1005     }
1006     resolver->move(operand, operand_for_instruction(phi));
1007   }
1008 }
1009 
1010 
1011 // Moves all stack values into their PHI position
1012 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1013   BlockBegin* bb = block();
1014   if (bb->number_of_sux() == 1) {
1015     BlockBegin* sux = bb->sux_at(0);
1016     assert(sux->number_of_preds() > 0, "invalid CFG");
1017 
1018     // a block with only one predecessor never has phi functions
1019     if (sux->number_of_preds() > 1) {
1020       PhiResolver resolver(this);
1021 
1022       ValueStack* sux_state = sux->state();
1023       Value sux_value;
1024       int index;
1025 
1026       assert(cur_state->scope() == sux_state->scope(), "not matching");
1027       assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1028       assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1029 
1030       for_each_stack_value(sux_state, index, sux_value) {
1031         move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1032       }
1033 
1034       for_each_local_value(sux_state, index, sux_value) {
1035         move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1036       }
1037 
1038       assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1039     }
1040   }
1041 }
1042 
1043 
1044 LIR_Opr LIRGenerator::new_register(BasicType type) {
1045   int vreg_num = _virtual_register_number;
1046   // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
1047   // a few extra registers before we really run out which helps to avoid to trip over assertions.
1048   if (vreg_num + 20 >= LIR_Opr::vreg_max) {
1049     bailout("out of virtual registers in LIR generator");
1050     if (vreg_num + 2 >= LIR_Opr::vreg_max) {
1051       // Wrap it around and continue until bailout really happens to avoid hitting assertions.
1052       _virtual_register_number = LIR_Opr::vreg_base;
1053       vreg_num = LIR_Opr::vreg_base;
1054     }
1055   }
1056   _virtual_register_number += 1;
1057   LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);
1058   assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
1059   return vreg;
1060 }
1061 
1062 
1063 // Try to lock using register in hint
1064 LIR_Opr LIRGenerator::rlock(Value instr) {
1065   return new_register(instr->type());
1066 }
1067 
1068 
1069 // does an rlock and sets result
1070 LIR_Opr LIRGenerator::rlock_result(Value x) {
1071   LIR_Opr reg = rlock(x);
1072   set_result(x, reg);
1073   return reg;
1074 }
1075 
1076 
1077 // does an rlock and sets result
1078 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1079   LIR_Opr reg;
1080   switch (type) {
1081   case T_BYTE:
1082   case T_BOOLEAN:
1083     reg = rlock_byte(type);
1084     break;
1085   default:
1086     reg = rlock(x);
1087     break;
1088   }
1089 
1090   set_result(x, reg);
1091   return reg;
1092 }
1093 
1094 
1095 //---------------------------------------------------------------------
1096 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1097   ObjectType* oc = value->type()->as_ObjectType();
1098   if (oc) {
1099     return oc->constant_value();
1100   }
1101   return nullptr;
1102 }
1103 
1104 
1105 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1106   assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1107   assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1108 
1109   // no moves are created for phi functions at the begin of exception
1110   // handlers, so assign operands manually here
1111   for_each_phi_fun(block(), phi,
1112                    if (!phi->is_illegal()) { operand_for_instruction(phi); });
1113 
1114   LIR_Opr thread_reg = getThreadPointer();
1115   __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1116                exceptionOopOpr());
1117   __ move_wide(LIR_OprFact::oopConst(nullptr),
1118                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1119   __ move_wide(LIR_OprFact::oopConst(nullptr),
1120                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1121 
1122   LIR_Opr result = new_register(T_OBJECT);
1123   __ move(exceptionOopOpr(), result);
1124   set_result(x, result);
1125 }
1126 
1127 
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 //----------------------------------------------------------------------
1132 //                        visitor functions
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136 //----------------------------------------------------------------------
1137 
1138 void LIRGenerator::do_Phi(Phi* x) {
1139   // phi functions are never visited directly
1140   ShouldNotReachHere();
1141 }
1142 
1143 
1144 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1145 void LIRGenerator::do_Constant(Constant* x) {
1146   if (x->state_before() != nullptr) {
1147     // Any constant with a ValueStack requires patching so emit the patch here
1148     LIR_Opr reg = rlock_result(x);
1149     CodeEmitInfo* info = state_for(x, x->state_before());
1150     __ oop2reg_patch(nullptr, reg, info);
1151   } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1152     if (!x->is_pinned()) {
1153       // unpinned constants are handled specially so that they can be
1154       // put into registers when they are used multiple times within a
1155       // block.  After the block completes their operand will be
1156       // cleared so that other blocks can't refer to that register.
1157       set_result(x, load_constant(x));
1158     } else {
1159       LIR_Opr res = x->operand();
1160       if (!res->is_valid()) {
1161         res = LIR_OprFact::value_type(x->type());
1162       }
1163       if (res->is_constant()) {
1164         LIR_Opr reg = rlock_result(x);
1165         __ move(res, reg);
1166       } else {
1167         set_result(x, res);
1168       }
1169     }
1170   } else {
1171     set_result(x, LIR_OprFact::value_type(x->type()));
1172   }
1173 }
1174 
1175 
1176 void LIRGenerator::do_Local(Local* x) {
1177   // operand_for_instruction has the side effect of setting the result
1178   // so there's no need to do it here.
1179   operand_for_instruction(x);
1180 }
1181 
1182 
1183 void LIRGenerator::do_Return(Return* x) {
1184   if (compilation()->env()->dtrace_method_probes()) {
1185     BasicTypeList signature;
1186     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
1187     signature.append(T_METADATA); // Method*
1188     LIR_OprList* args = new LIR_OprList();
1189     args->append(getThreadPointer());
1190     LIR_Opr meth = new_register(T_METADATA);
1191     __ metadata2reg(method()->constant_encoding(), meth);
1192     args->append(meth);
1193     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, nullptr);
1194   }
1195 
1196   if (x->type()->is_void()) {
1197     __ return_op(LIR_OprFact::illegalOpr);
1198   } else {
1199     LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1200     LIRItem result(x->result(), this);
1201 
1202     result.load_item_force(reg);
1203     __ return_op(result.result());
1204   }
1205   set_no_result(x);
1206 }
1207 
1208 // Example: ref.get()
1209 // Combination of LoadField and g1 pre-write barrier
1210 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1211 
1212   const int referent_offset = java_lang_ref_Reference::referent_offset();
1213 
1214   assert(x->number_of_arguments() == 1, "wrong type");
1215 
1216   LIRItem reference(x->argument_at(0), this);
1217   reference.load_item();
1218 
1219   // need to perform the null check on the reference object
1220   CodeEmitInfo* info = nullptr;
1221   if (x->needs_null_check()) {
1222     info = state_for(x);
1223   }
1224 
1225   LIR_Opr result = rlock_result(x, T_OBJECT);
1226   access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
1227                  reference, LIR_OprFact::intConst(referent_offset), result,
1228                  nullptr, info);
1229 }
1230 
1231 // Example: clazz.isInstance(object)
1232 void LIRGenerator::do_isInstance(Intrinsic* x) {
1233   assert(x->number_of_arguments() == 2, "wrong type");
1234 
1235   // TODO could try to substitute this node with an equivalent InstanceOf
1236   // if clazz is known to be a constant Class. This will pick up newly found
1237   // constants after HIR construction. I'll leave this to a future change.
1238 
1239   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1240   // could follow the aastore example in a future change.
1241 
1242   LIRItem clazz(x->argument_at(0), this);
1243   LIRItem object(x->argument_at(1), this);
1244   clazz.load_item();
1245   object.load_item();
1246   LIR_Opr result = rlock_result(x);
1247 
1248   // need to perform null check on clazz
1249   if (x->needs_null_check()) {
1250     CodeEmitInfo* info = state_for(x);
1251     __ null_check(clazz.result(), info);
1252   }
1253 
1254   LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1255                                      CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1256                                      x->type(),
1257                                      nullptr); // null CodeEmitInfo results in a leaf call
1258   __ move(call_result, result);
1259 }
1260 
1261 void LIRGenerator::load_klass(LIR_Opr obj, LIR_Opr klass, CodeEmitInfo* null_check_info) {
1262   __ load_klass(obj, klass, null_check_info);
1263 }
1264 
1265 // Example: object.getClass ()
1266 void LIRGenerator::do_getClass(Intrinsic* x) {
1267   assert(x->number_of_arguments() == 1, "wrong type");
1268 
1269   LIRItem rcvr(x->argument_at(0), this);
1270   rcvr.load_item();
1271   LIR_Opr temp = new_register(T_ADDRESS);
1272   LIR_Opr result = rlock_result(x);
1273 
1274   // need to perform the null check on the rcvr
1275   CodeEmitInfo* info = nullptr;
1276   if (x->needs_null_check()) {
1277     info = state_for(x);
1278   }
1279 
1280   LIR_Opr klass = new_register(T_METADATA);
1281   load_klass(rcvr.result(), klass, info);
1282   __ move_wide(new LIR_Address(klass, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
1283   // mirror = ((OopHandle)mirror)->resolve();
1284   access_load(IN_NATIVE, T_OBJECT,
1285               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
1286 }
1287 
1288 // java.lang.Class::isPrimitive()
1289 void LIRGenerator::do_isPrimitive(Intrinsic* x) {
1290   assert(x->number_of_arguments() == 1, "wrong type");
1291 
1292   LIRItem rcvr(x->argument_at(0), this);
1293   rcvr.load_item();
1294   LIR_Opr temp = new_register(T_METADATA);
1295   LIR_Opr result = rlock_result(x);
1296 
1297   CodeEmitInfo* info = nullptr;
1298   if (x->needs_null_check()) {
1299     info = state_for(x);
1300   }
1301 
1302   __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset(), T_ADDRESS), temp, info);
1303   __ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0));
1304   __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
1305 }
1306 
1307 // Example: Foo.class.getModifiers()
1308 void LIRGenerator::do_getModifiers(Intrinsic* x) {
1309   assert(x->number_of_arguments() == 1, "wrong type");
1310 
1311   LIRItem receiver(x->argument_at(0), this);
1312   receiver.load_item();
1313   LIR_Opr result = rlock_result(x);
1314 
1315   CodeEmitInfo* info = nullptr;
1316   if (x->needs_null_check()) {
1317     info = state_for(x);
1318   }
1319 
1320   // While reading off the universal constant mirror is less efficient than doing
1321   // another branch and returning the constant answer, this branchless code runs into
1322   // much less risk of confusion for C1 register allocator. The choice of the universe
1323   // object here is correct as long as it returns the same modifiers we would expect
1324   // from the primitive class itself. See spec for Class.getModifiers that provides
1325   // the typed array klasses with similar modifiers as their component types.
1326 
1327   Klass* univ_klass = Universe::byteArrayKlass();
1328   assert(univ_klass->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC), "Sanity");
1329   LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass);
1330 
1331   LIR_Opr recv_klass = new_register(T_METADATA);
1332   __ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);
1333 
1334   // Check if this is a Java mirror of primitive type, and select the appropriate klass.
1335   LIR_Opr klass = new_register(T_METADATA);
1336   __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0));
1337   __ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
1338 
1339   // Get the answer.
1340   __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result);
1341 }
1342 
1343 void LIRGenerator::do_getObjectSize(Intrinsic* x) {
1344   assert(x->number_of_arguments() == 3, "wrong type");
1345   LIR_Opr result_reg = rlock_result(x);
1346 
1347   LIRItem value(x->argument_at(2), this);
1348   value.load_item();
1349 
1350   LIR_Opr klass = new_register(T_METADATA);
1351   load_klass(value.result(), klass, nullptr);
1352   LIR_Opr layout = new_register(T_INT);
1353   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1354 
1355   LabelObj* L_done = new LabelObj();
1356   LabelObj* L_array = new LabelObj();
1357 
1358   __ cmp(lir_cond_lessEqual, layout, 0);
1359   __ branch(lir_cond_lessEqual, L_array->label());
1360 
1361   // Instance case: the layout helper gives us instance size almost directly,
1362   // but we need to mask out the _lh_instance_slow_path_bit.
1363 
1364   assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
1365 
1366   LIR_Opr mask = load_immediate(~(jint) right_n_bits(LogBytesPerLong), T_INT);
1367   __ logical_and(layout, mask, layout);
1368   __ convert(Bytecodes::_i2l, layout, result_reg);
1369 
1370   __ branch(lir_cond_always, L_done->label());
1371 
1372   // Array case: size is round(header + element_size*arraylength).
1373   // Since arraylength is different for every array instance, we have to
1374   // compute the whole thing at runtime.
1375 
1376   __ branch_destination(L_array->label());
1377 
1378   int round_mask = MinObjAlignmentInBytes - 1;
1379 
1380   // Figure out header sizes first.
1381   LIR_Opr hss = load_immediate(Klass::_lh_header_size_shift, T_INT);
1382   LIR_Opr hsm = load_immediate(Klass::_lh_header_size_mask, T_INT);
1383 
1384   LIR_Opr header_size = new_register(T_INT);
1385   __ move(layout, header_size);
1386   LIR_Opr tmp = new_register(T_INT);
1387   __ unsigned_shift_right(header_size, hss, header_size, tmp);
1388   __ logical_and(header_size, hsm, header_size);
1389   __ add(header_size, LIR_OprFact::intConst(round_mask), header_size);
1390 
1391   // Figure out the array length in bytes
1392   assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
1393   LIR_Opr l2esm = load_immediate(Klass::_lh_log2_element_size_mask, T_INT);
1394   __ logical_and(layout, l2esm, layout);
1395 
1396   LIR_Opr length_int = new_register(T_INT);
1397   __ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int);
1398 
1399 #ifdef _LP64
1400   LIR_Opr length = new_register(T_LONG);
1401   __ convert(Bytecodes::_i2l, length_int, length);
1402 #endif
1403 
1404   // Shift-left awkwardness. Normally it is just:
1405   //   __ shift_left(length, layout, length);
1406   // But C1 cannot perform shift_left with non-constant count, so we end up
1407   // doing the per-bit loop dance here. x86_32 also does not know how to shift
1408   // longs, so we have to act on ints.
1409   LabelObj* L_shift_loop = new LabelObj();
1410   LabelObj* L_shift_exit = new LabelObj();
1411 
1412   __ branch_destination(L_shift_loop->label());
1413   __ cmp(lir_cond_equal, layout, 0);
1414   __ branch(lir_cond_equal, L_shift_exit->label());
1415 
1416 #ifdef _LP64
1417   __ shift_left(length, 1, length);
1418 #else
1419   __ shift_left(length_int, 1, length_int);
1420 #endif
1421 
1422   __ sub(layout, LIR_OprFact::intConst(1), layout);
1423 
1424   __ branch(lir_cond_always, L_shift_loop->label());
1425   __ branch_destination(L_shift_exit->label());
1426 
1427   // Mix all up, round, and push to the result.
1428 #ifdef _LP64
1429   LIR_Opr header_size_long = new_register(T_LONG);
1430   __ convert(Bytecodes::_i2l, header_size, header_size_long);
1431   __ add(length, header_size_long, length);
1432   if (round_mask != 0) {
1433     LIR_Opr round_mask_opr = load_immediate(~(jlong)round_mask, T_LONG);
1434     __ logical_and(length, round_mask_opr, length);
1435   }
1436   __ move(length, result_reg);
1437 #else
1438   __ add(length_int, header_size, length_int);
1439   if (round_mask != 0) {
1440     LIR_Opr round_mask_opr = load_immediate(~round_mask, T_INT);
1441     __ logical_and(length_int, round_mask_opr, length_int);
1442   }
1443   __ convert(Bytecodes::_i2l, length_int, result_reg);
1444 #endif
1445 
1446   __ branch_destination(L_done->label());
1447 }
1448 
1449 void LIRGenerator::do_scopedValueCache(Intrinsic* x) {
1450   do_JavaThreadField(x, JavaThread::scopedValueCache_offset());
1451 }
1452 
1453 // Example: Thread.currentCarrierThread()
1454 void LIRGenerator::do_currentCarrierThread(Intrinsic* x) {
1455   do_JavaThreadField(x, JavaThread::threadObj_offset());
1456 }
1457 
1458 void LIRGenerator::do_vthread(Intrinsic* x) {
1459   do_JavaThreadField(x, JavaThread::vthread_offset());
1460 }
1461 
1462 void LIRGenerator::do_JavaThreadField(Intrinsic* x, ByteSize offset) {
1463   assert(x->number_of_arguments() == 0, "wrong type");
1464   LIR_Opr temp = new_register(T_ADDRESS);
1465   LIR_Opr reg = rlock_result(x);
1466   __ move(new LIR_Address(getThreadPointer(), in_bytes(offset), T_ADDRESS), temp);
1467   access_load(IN_NATIVE, T_OBJECT,
1468               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg);
1469 }
1470 
1471 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1472   assert(x->number_of_arguments() == 1, "wrong type");
1473   LIRItem receiver(x->argument_at(0), this);
1474 
1475   receiver.load_item();
1476   BasicTypeList signature;
1477   signature.append(T_OBJECT); // receiver
1478   LIR_OprList* args = new LIR_OprList();
1479   args->append(receiver.result());
1480   CodeEmitInfo* info = state_for(x, x->state());
1481   call_runtime(&signature, args,
1482                CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1483                voidType, info);
1484 
1485   set_no_result(x);
1486 }
1487 
1488 
1489 //------------------------local access--------------------------------------
1490 
1491 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1492   if (x->operand()->is_illegal()) {
1493     Constant* c = x->as_Constant();
1494     if (c != nullptr) {
1495       x->set_operand(LIR_OprFact::value_type(c->type()));
1496     } else {
1497       assert(x->as_Phi() || x->as_Local() != nullptr, "only for Phi and Local");
1498       // allocate a virtual register for this local or phi
1499       x->set_operand(rlock(x));
1500 #ifdef ASSERT
1501       _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, nullptr);
1502 #endif
1503     }
1504   }
1505   return x->operand();
1506 }
1507 
1508 #ifdef ASSERT
1509 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1510   if (reg_num < _instruction_for_operand.length()) {
1511     return _instruction_for_operand.at(reg_num);
1512   }
1513   return nullptr;
1514 }
1515 #endif
1516 
1517 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1518   if (_vreg_flags.size_in_bits() == 0) {
1519     BitMap2D temp(100, num_vreg_flags);
1520     _vreg_flags = temp;
1521   }
1522   _vreg_flags.at_put_grow(vreg_num, f, true);
1523 }
1524 
1525 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1526   if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1527     return false;
1528   }
1529   return _vreg_flags.at(vreg_num, f);
1530 }
1531 
1532 
1533 // Block local constant handling.  This code is useful for keeping
1534 // unpinned constants and constants which aren't exposed in the IR in
1535 // registers.  Unpinned Constant instructions have their operands
1536 // cleared when the block is finished so that other blocks can't end
1537 // up referring to their registers.
1538 
1539 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1540   assert(!x->is_pinned(), "only for unpinned constants");
1541   _unpinned_constants.append(x);
1542   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1543 }
1544 
1545 
1546 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1547   BasicType t = c->type();
1548   for (int i = 0; i < _constants.length(); i++) {
1549     LIR_Const* other = _constants.at(i);
1550     if (t == other->type()) {
1551       switch (t) {
1552       case T_INT:
1553       case T_FLOAT:
1554         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1555         break;
1556       case T_LONG:
1557       case T_DOUBLE:
1558         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1559         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1560         break;
1561       case T_OBJECT:
1562         if (c->as_jobject() != other->as_jobject()) continue;
1563         break;
1564       default:
1565         break;
1566       }
1567       return _reg_for_constants.at(i);
1568     }
1569   }
1570 
1571   LIR_Opr result = new_register(t);
1572   __ move((LIR_Opr)c, result);
1573   _constants.append(c);
1574   _reg_for_constants.append(result);
1575   return result;
1576 }
1577 
1578 //------------------------field access--------------------------------------
1579 
1580 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1581   assert(x->number_of_arguments() == 4, "wrong type");
1582   LIRItem obj   (x->argument_at(0), this);  // object
1583   LIRItem offset(x->argument_at(1), this);  // offset of field
1584   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1585   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1586   assert(obj.type()->tag() == objectTag, "invalid type");
1587   assert(cmp.type()->tag() == type->tag(), "invalid type");
1588   assert(val.type()->tag() == type->tag(), "invalid type");
1589 
1590   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1591                                             obj, offset, cmp, val);
1592   set_result(x, result);
1593 }
1594 
1595 // Comment copied form templateTable_i486.cpp
1596 // ----------------------------------------------------------------------------
1597 // Volatile variables demand their effects be made known to all CPU's in
1598 // order.  Store buffers on most chips allow reads & writes to reorder; the
1599 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1600 // memory barrier (i.e., it's not sufficient that the interpreter does not
1601 // reorder volatile references, the hardware also must not reorder them).
1602 //
1603 // According to the new Java Memory Model (JMM):
1604 // (1) All volatiles are serialized wrt to each other.
1605 // ALSO reads & writes act as acquire & release, so:
1606 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1607 // the read float up to before the read.  It's OK for non-volatile memory refs
1608 // that happen before the volatile read to float down below it.
1609 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1610 // that happen BEFORE the write float down to after the write.  It's OK for
1611 // non-volatile memory refs that happen after the volatile write to float up
1612 // before it.
1613 //
1614 // We only put in barriers around volatile refs (they are expensive), not
1615 // _between_ memory refs (that would require us to track the flavor of the
1616 // previous memory refs).  Requirements (2) and (3) require some barriers
1617 // before volatile stores and after volatile loads.  These nearly cover
1618 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1619 // case is placed after volatile-stores although it could just as well go
1620 // before volatile-loads.
1621 
1622 
1623 void LIRGenerator::do_StoreField(StoreField* x) {
1624   bool needs_patching = x->needs_patching();
1625   bool is_volatile = x->field()->is_volatile();
1626   BasicType field_type = x->field_type();
1627 
1628   CodeEmitInfo* info = nullptr;
1629   if (needs_patching) {
1630     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1631     info = state_for(x, x->state_before());
1632   } else if (x->needs_null_check()) {
1633     NullCheck* nc = x->explicit_null_check();
1634     if (nc == nullptr) {
1635       info = state_for(x);
1636     } else {
1637       info = state_for(nc);
1638     }
1639   }
1640 
1641   LIRItem object(x->obj(), this);
1642   LIRItem value(x->value(),  this);
1643 
1644   object.load_item();
1645 
1646   if (is_volatile || needs_patching) {
1647     // load item if field is volatile (fewer special cases for volatiles)
1648     // load item if field not initialized
1649     // load item if field not constant
1650     // because of code patching we cannot inline constants
1651     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1652       value.load_byte_item();
1653     } else  {
1654       value.load_item();
1655     }
1656   } else {
1657     value.load_for_store(field_type);
1658   }
1659 
1660   set_no_result(x);
1661 
1662 #ifndef PRODUCT
1663   if (PrintNotLoaded && needs_patching) {
1664     tty->print_cr("   ###class not loaded at store_%s bci %d",
1665                   x->is_static() ?  "static" : "field", x->printable_bci());
1666   }
1667 #endif
1668 
1669   if (x->needs_null_check() &&
1670       (needs_patching ||
1671        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1672     // Emit an explicit null check because the offset is too large.
1673     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1674     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1675     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1676   }
1677 
1678   DecoratorSet decorators = IN_HEAP;
1679   if (is_volatile) {
1680     decorators |= MO_SEQ_CST;
1681   }
1682   if (needs_patching) {
1683     decorators |= C1_NEEDS_PATCHING;
1684   }
1685 
1686   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1687                   value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1688 }
1689 
1690 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1691   assert(x->is_pinned(),"");
1692   bool needs_range_check = x->compute_needs_range_check();
1693   bool use_length = x->length() != nullptr;
1694   bool obj_store = is_reference_type(x->elt_type());
1695   bool needs_store_check = obj_store && (x->value()->as_Constant() == nullptr ||
1696                                          !get_jobject_constant(x->value())->is_null_object() ||
1697                                          x->should_profile());
1698 
1699   LIRItem array(x->array(), this);
1700   LIRItem index(x->index(), this);
1701   LIRItem value(x->value(), this);
1702   LIRItem length(this);
1703 
1704   array.load_item();
1705   index.load_nonconstant();
1706 
1707   if (use_length && needs_range_check) {
1708     length.set_instruction(x->length());
1709     length.load_item();
1710 
1711   }
1712   if (needs_store_check || x->check_boolean()) {
1713     value.load_item();
1714   } else {
1715     value.load_for_store(x->elt_type());
1716   }
1717 
1718   set_no_result(x);
1719 
1720   // the CodeEmitInfo must be duplicated for each different
1721   // LIR-instruction because spilling can occur anywhere between two
1722   // instructions and so the debug information must be different
1723   CodeEmitInfo* range_check_info = state_for(x);
1724   CodeEmitInfo* null_check_info = nullptr;
1725   if (x->needs_null_check()) {
1726     null_check_info = new CodeEmitInfo(range_check_info);
1727   }
1728 
1729   if (needs_range_check) {
1730     if (use_length) {
1731       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1732       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1733     } else {
1734       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1735       // range_check also does the null check
1736       null_check_info = nullptr;
1737     }
1738   }
1739 
1740   if (GenerateArrayStoreCheck && needs_store_check) {
1741     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1742     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1743   }
1744 
1745   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1746   if (x->check_boolean()) {
1747     decorators |= C1_MASK_BOOLEAN;
1748   }
1749 
1750   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1751                   nullptr, null_check_info);
1752 }
1753 
1754 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1755                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1756                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1757   decorators |= ACCESS_READ;
1758   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1759   if (access.is_raw()) {
1760     _barrier_set->BarrierSetC1::load_at(access, result);
1761   } else {
1762     _barrier_set->load_at(access, result);
1763   }
1764 }
1765 
1766 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1767                                LIR_Opr addr, LIR_Opr result) {
1768   decorators |= ACCESS_READ;
1769   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1770   access.set_resolved_addr(addr);
1771   if (access.is_raw()) {
1772     _barrier_set->BarrierSetC1::load(access, result);
1773   } else {
1774     _barrier_set->load(access, result);
1775   }
1776 }
1777 
1778 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1779                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
1780                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
1781   decorators |= ACCESS_WRITE;
1782   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
1783   if (access.is_raw()) {
1784     _barrier_set->BarrierSetC1::store_at(access, value);
1785   } else {
1786     _barrier_set->store_at(access, value);
1787   }
1788 }
1789 
1790 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1791                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1792   decorators |= ACCESS_READ;
1793   decorators |= ACCESS_WRITE;
1794   // Atomic operations are SEQ_CST by default
1795   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1796   LIRAccess access(this, decorators, base, offset, type);
1797   if (access.is_raw()) {
1798     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
1799   } else {
1800     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
1801   }
1802 }
1803 
1804 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
1805                                             LIRItem& base, LIRItem& offset, LIRItem& value) {
1806   decorators |= ACCESS_READ;
1807   decorators |= ACCESS_WRITE;
1808   // Atomic operations are SEQ_CST by default
1809   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1810   LIRAccess access(this, decorators, base, offset, type);
1811   if (access.is_raw()) {
1812     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
1813   } else {
1814     return _barrier_set->atomic_xchg_at(access, value);
1815   }
1816 }
1817 
1818 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1819                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
1820   decorators |= ACCESS_READ;
1821   decorators |= ACCESS_WRITE;
1822   // Atomic operations are SEQ_CST by default
1823   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1824   LIRAccess access(this, decorators, base, offset, type);
1825   if (access.is_raw()) {
1826     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1827   } else {
1828     return _barrier_set->atomic_add_at(access, value);
1829   }
1830 }
1831 
1832 void LIRGenerator::do_LoadField(LoadField* x) {
1833   bool needs_patching = x->needs_patching();
1834   bool is_volatile = x->field()->is_volatile();
1835   BasicType field_type = x->field_type();
1836 
1837   CodeEmitInfo* info = nullptr;
1838   if (needs_patching) {
1839     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1840     info = state_for(x, x->state_before());
1841   } else if (x->needs_null_check()) {
1842     NullCheck* nc = x->explicit_null_check();
1843     if (nc == nullptr) {
1844       info = state_for(x);
1845     } else {
1846       info = state_for(nc);
1847     }
1848   }
1849 
1850   LIRItem object(x->obj(), this);
1851 
1852   object.load_item();
1853 
1854 #ifndef PRODUCT
1855   if (PrintNotLoaded && needs_patching) {
1856     tty->print_cr("   ###class not loaded at load_%s bci %d",
1857                   x->is_static() ?  "static" : "field", x->printable_bci());
1858   }
1859 #endif
1860 
1861   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1862   if (x->needs_null_check() &&
1863       (needs_patching ||
1864        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1865        stress_deopt)) {
1866     LIR_Opr obj = object.result();
1867     if (stress_deopt) {
1868       obj = new_register(T_OBJECT);
1869       __ move(LIR_OprFact::oopConst(nullptr), obj);
1870     }
1871     // Emit an explicit null check because the offset is too large.
1872     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1873     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1874     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1875   }
1876 
1877   DecoratorSet decorators = IN_HEAP;
1878   if (is_volatile) {
1879     decorators |= MO_SEQ_CST;
1880   }
1881   if (needs_patching) {
1882     decorators |= C1_NEEDS_PATCHING;
1883   }
1884 
1885   LIR_Opr result = rlock_result(x, field_type);
1886   access_load_at(decorators, field_type,
1887                  object, LIR_OprFact::intConst(x->offset()), result,
1888                  info ? new CodeEmitInfo(info) : nullptr, info);
1889 }
1890 
1891 // int/long jdk.internal.util.Preconditions.checkIndex
1892 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
1893   assert(x->number_of_arguments() == 3, "wrong type");
1894   LIRItem index(x->argument_at(0), this);
1895   LIRItem length(x->argument_at(1), this);
1896   LIRItem oobef(x->argument_at(2), this);
1897 
1898   index.load_item();
1899   length.load_item();
1900   oobef.load_item();
1901 
1902   LIR_Opr result = rlock_result(x);
1903   // x->state() is created from copy_state_for_exception, it does not contains arguments
1904   // we should prepare them before entering into interpreter mode due to deoptimization.
1905   ValueStack* state = x->state();
1906   for (int i = 0; i < x->number_of_arguments(); i++) {
1907     Value arg = x->argument_at(i);
1908     state->push(arg->type(), arg);
1909   }
1910   CodeEmitInfo* info = state_for(x, state);
1911 
1912   LIR_Opr len = length.result();
1913   LIR_Opr zero;
1914   if (type == T_INT) {
1915     zero = LIR_OprFact::intConst(0);
1916     if (length.result()->is_constant()){
1917       len = LIR_OprFact::intConst(length.result()->as_jint());
1918     }
1919   } else {
1920     assert(type == T_LONG, "sanity check");
1921     zero = LIR_OprFact::longConst(0);
1922     if (length.result()->is_constant()){
1923       len = LIR_OprFact::longConst(length.result()->as_jlong());
1924     }
1925   }
1926   // C1 can not handle the case that comparing index with constant value while condition
1927   // is neither lir_cond_equal nor lir_cond_notEqual, see LIR_Assembler::comp_op.
1928   LIR_Opr zero_reg = new_register(type);
1929   __ move(zero, zero_reg);
1930 #if defined(X86) && !defined(_LP64)
1931   // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
1932   LIR_Opr index_copy = new_register(index.type());
1933   // index >= 0
1934   __ move(index.result(), index_copy);
1935   __ cmp(lir_cond_less, index_copy, zero_reg);
1936   __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
1937                                                     Deoptimization::Action_make_not_entrant));
1938   // index < length
1939   __ move(index.result(), index_copy);
1940   __ cmp(lir_cond_greaterEqual, index_copy, len);
1941   __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
1942                                                             Deoptimization::Action_make_not_entrant));
1943 #else
1944   // index >= 0
1945   __ cmp(lir_cond_less, index.result(), zero_reg);
1946   __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
1947                                                     Deoptimization::Action_make_not_entrant));
1948   // index < length
1949   __ cmp(lir_cond_greaterEqual, index.result(), len);
1950   __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
1951                                                             Deoptimization::Action_make_not_entrant));
1952 #endif
1953   __ move(index.result(), result);
1954 }
1955 
1956 //------------------------array access--------------------------------------
1957 
1958 
1959 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1960   LIRItem array(x->array(), this);
1961   array.load_item();
1962   LIR_Opr reg = rlock_result(x);
1963 
1964   CodeEmitInfo* info = nullptr;
1965   if (x->needs_null_check()) {
1966     NullCheck* nc = x->explicit_null_check();
1967     if (nc == nullptr) {
1968       info = state_for(x);
1969     } else {
1970       info = state_for(nc);
1971     }
1972     if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1973       LIR_Opr obj = new_register(T_OBJECT);
1974       __ move(LIR_OprFact::oopConst(nullptr), obj);
1975       __ null_check(obj, new CodeEmitInfo(info));
1976     }
1977   }
1978   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1979 }
1980 
1981 
1982 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1983   bool use_length = x->length() != nullptr;
1984   LIRItem array(x->array(), this);
1985   LIRItem index(x->index(), this);
1986   LIRItem length(this);
1987   bool needs_range_check = x->compute_needs_range_check();
1988 
1989   if (use_length && needs_range_check) {
1990     length.set_instruction(x->length());
1991     length.load_item();
1992   }
1993 
1994   array.load_item();
1995   if (index.is_constant() && can_inline_as_constant(x->index())) {
1996     // let it be a constant
1997     index.dont_load_item();
1998   } else {
1999     index.load_item();
2000   }
2001 
2002   CodeEmitInfo* range_check_info = state_for(x);
2003   CodeEmitInfo* null_check_info = nullptr;
2004   if (x->needs_null_check()) {
2005     NullCheck* nc = x->explicit_null_check();
2006     if (nc != nullptr) {
2007       null_check_info = state_for(nc);
2008     } else {
2009       null_check_info = range_check_info;
2010     }
2011     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
2012       LIR_Opr obj = new_register(T_OBJECT);
2013       __ move(LIR_OprFact::oopConst(nullptr), obj);
2014       __ null_check(obj, new CodeEmitInfo(null_check_info));
2015     }
2016   }
2017 
2018   if (needs_range_check) {
2019     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2020       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2021     } else if (use_length) {
2022       // TODO: use a (modified) version of array_range_check that does not require a
2023       //       constant length to be loaded to a register
2024       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2025       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2026     } else {
2027       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2028       // The range check performs the null check, so clear it out for the load
2029       null_check_info = nullptr;
2030     }
2031   }
2032 
2033   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2034 
2035   LIR_Opr result = rlock_result(x, x->elt_type());
2036   access_load_at(decorators, x->elt_type(),
2037                  array, index.result(), result,
2038                  nullptr, null_check_info);
2039 }
2040 
2041 
2042 void LIRGenerator::do_NullCheck(NullCheck* x) {
2043   if (x->can_trap()) {
2044     LIRItem value(x->obj(), this);
2045     value.load_item();
2046     CodeEmitInfo* info = state_for(x);
2047     __ null_check(value.result(), info);
2048   }
2049 }
2050 
2051 
2052 void LIRGenerator::do_TypeCast(TypeCast* x) {
2053   LIRItem value(x->obj(), this);
2054   value.load_item();
2055   // the result is the same as from the node we are casting
2056   set_result(x, value.result());
2057 }
2058 
2059 
2060 void LIRGenerator::do_Throw(Throw* x) {
2061   LIRItem exception(x->exception(), this);
2062   exception.load_item();
2063   set_no_result(x);
2064   LIR_Opr exception_opr = exception.result();
2065   CodeEmitInfo* info = state_for(x, x->state());
2066 
2067 #ifndef PRODUCT
2068   if (PrintC1Statistics) {
2069     increment_counter(Runtime1::throw_count_address(), T_INT);
2070   }
2071 #endif
2072 
2073   // check if the instruction has an xhandler in any of the nested scopes
2074   bool unwind = false;
2075   if (info->exception_handlers()->length() == 0) {
2076     // this throw is not inside an xhandler
2077     unwind = true;
2078   } else {
2079     // get some idea of the throw type
2080     bool type_is_exact = true;
2081     ciType* throw_type = x->exception()->exact_type();
2082     if (throw_type == nullptr) {
2083       type_is_exact = false;
2084       throw_type = x->exception()->declared_type();
2085     }
2086     if (throw_type != nullptr && throw_type->is_instance_klass()) {
2087       ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2088       unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2089     }
2090   }
2091 
2092   // do null check before moving exception oop into fixed register
2093   // to avoid a fixed interval with an oop during the null check.
2094   // Use a copy of the CodeEmitInfo because debug information is
2095   // different for null_check and throw.
2096   if (x->exception()->as_NewInstance() == nullptr && x->exception()->as_ExceptionObject() == nullptr) {
2097     // if the exception object wasn't created using new then it might be null.
2098     __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2099   }
2100 
2101   if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2102     // we need to go through the exception lookup path to get JVMTI
2103     // notification done
2104     unwind = false;
2105   }
2106 
2107   // move exception oop into fixed register
2108   __ move(exception_opr, exceptionOopOpr());
2109 
2110   if (unwind) {
2111     __ unwind_exception(exceptionOopOpr());
2112   } else {
2113     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2114   }
2115 }
2116 
2117 
2118 void LIRGenerator::do_RoundFP(RoundFP* x) {
2119   assert(strict_fp_requires_explicit_rounding, "not required");
2120 
2121   LIRItem input(x->input(), this);
2122   input.load_item();
2123   LIR_Opr input_opr = input.result();
2124   assert(input_opr->is_register(), "why round if value is not in a register?");
2125   assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2126   if (input_opr->is_single_fpu()) {
2127     set_result(x, round_item(input_opr)); // This code path not currently taken
2128   } else {
2129     LIR_Opr result = new_register(T_DOUBLE);
2130     set_vreg_flag(result, must_start_in_memory);
2131     __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2132     set_result(x, result);
2133   }
2134 }
2135 
2136 
2137 void LIRGenerator::do_UnsafeGet(UnsafeGet* x) {
2138   BasicType type = x->basic_type();
2139   LIRItem src(x->object(), this);
2140   LIRItem off(x->offset(), this);
2141 
2142   off.load_item();
2143   src.load_item();
2144 
2145   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2146 
2147   if (x->is_volatile()) {
2148     decorators |= MO_SEQ_CST;
2149   }
2150   if (type == T_BOOLEAN) {
2151     decorators |= C1_MASK_BOOLEAN;
2152   }
2153   if (is_reference_type(type)) {
2154     decorators |= ON_UNKNOWN_OOP_REF;
2155   }
2156 
2157   LIR_Opr result = rlock_result(x, type);
2158   if (!x->is_raw()) {
2159     access_load_at(decorators, type, src, off.result(), result);
2160   } else {
2161     // Currently it is only used in GraphBuilder::setup_osr_entry_block.
2162     // It reads the value from [src + offset] directly.
2163 #ifdef _LP64
2164     LIR_Opr offset = new_register(T_LONG);
2165     __ convert(Bytecodes::_i2l, off.result(), offset);
2166 #else
2167     LIR_Opr offset = off.result();
2168 #endif
2169     LIR_Address* addr = new LIR_Address(src.result(), offset, type);
2170     if (is_reference_type(type)) {
2171       __ move_wide(addr, result);
2172     } else {
2173       __ move(addr, result);
2174     }
2175   }
2176 }
2177 
2178 
2179 void LIRGenerator::do_UnsafePut(UnsafePut* x) {
2180   BasicType type = x->basic_type();
2181   LIRItem src(x->object(), this);
2182   LIRItem off(x->offset(), this);
2183   LIRItem data(x->value(), this);
2184 
2185   src.load_item();
2186   if (type == T_BOOLEAN || type == T_BYTE) {
2187     data.load_byte_item();
2188   } else {
2189     data.load_item();
2190   }
2191   off.load_item();
2192 
2193   set_no_result(x);
2194 
2195   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2196   if (is_reference_type(type)) {
2197     decorators |= ON_UNKNOWN_OOP_REF;
2198   }
2199   if (x->is_volatile()) {
2200     decorators |= MO_SEQ_CST;
2201   }
2202   access_store_at(decorators, type, src, off.result(), data.result());
2203 }
2204 
2205 void LIRGenerator::do_UnsafeGetAndSet(UnsafeGetAndSet* x) {
2206   BasicType type = x->basic_type();
2207   LIRItem src(x->object(), this);
2208   LIRItem off(x->offset(), this);
2209   LIRItem value(x->value(), this);
2210 
2211   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
2212 
2213   if (is_reference_type(type)) {
2214     decorators |= ON_UNKNOWN_OOP_REF;
2215   }
2216 
2217   LIR_Opr result;
2218   if (x->is_add()) {
2219     result = access_atomic_add_at(decorators, type, src, off, value);
2220   } else {
2221     result = access_atomic_xchg_at(decorators, type, src, off, value);
2222   }
2223   set_result(x, result);
2224 }
2225 
2226 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2227   int lng = x->length();
2228 
2229   for (int i = 0; i < lng; i++) {
2230     C1SwitchRange* one_range = x->at(i);
2231     int low_key = one_range->low_key();
2232     int high_key = one_range->high_key();
2233     BlockBegin* dest = one_range->sux();
2234     if (low_key == high_key) {
2235       __ cmp(lir_cond_equal, value, low_key);
2236       __ branch(lir_cond_equal, dest);
2237     } else if (high_key - low_key == 1) {
2238       __ cmp(lir_cond_equal, value, low_key);
2239       __ branch(lir_cond_equal, dest);
2240       __ cmp(lir_cond_equal, value, high_key);
2241       __ branch(lir_cond_equal, dest);
2242     } else {
2243       LabelObj* L = new LabelObj();
2244       __ cmp(lir_cond_less, value, low_key);
2245       __ branch(lir_cond_less, L->label());
2246       __ cmp(lir_cond_lessEqual, value, high_key);
2247       __ branch(lir_cond_lessEqual, dest);
2248       __ branch_destination(L->label());
2249     }
2250   }
2251   __ jump(default_sux);
2252 }
2253 
2254 
2255 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2256   SwitchRangeList* res = new SwitchRangeList();
2257   int len = x->length();
2258   if (len > 0) {
2259     BlockBegin* sux = x->sux_at(0);
2260     int low = x->lo_key();
2261     BlockBegin* default_sux = x->default_sux();
2262     C1SwitchRange* range = new C1SwitchRange(low, sux);
2263     for (int i = 0; i < len; i++) {
2264       int key = low + i;
2265       BlockBegin* new_sux = x->sux_at(i);
2266       if (sux == new_sux) {
2267         // still in same range
2268         range->set_high_key(key);
2269       } else {
2270         // skip tests which explicitly dispatch to the default
2271         if (sux != default_sux) {
2272           res->append(range);
2273         }
2274         range = new C1SwitchRange(key, new_sux);
2275       }
2276       sux = new_sux;
2277     }
2278     if (res->length() == 0 || res->last() != range)  res->append(range);
2279   }
2280   return res;
2281 }
2282 
2283 
2284 // we expect the keys to be sorted by increasing value
2285 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2286   SwitchRangeList* res = new SwitchRangeList();
2287   int len = x->length();
2288   if (len > 0) {
2289     BlockBegin* default_sux = x->default_sux();
2290     int key = x->key_at(0);
2291     BlockBegin* sux = x->sux_at(0);
2292     C1SwitchRange* range = new C1SwitchRange(key, sux);
2293     for (int i = 1; i < len; i++) {
2294       int new_key = x->key_at(i);
2295       BlockBegin* new_sux = x->sux_at(i);
2296       if (key+1 == new_key && sux == new_sux) {
2297         // still in same range
2298         range->set_high_key(new_key);
2299       } else {
2300         // skip tests which explicitly dispatch to the default
2301         if (range->sux() != default_sux) {
2302           res->append(range);
2303         }
2304         range = new C1SwitchRange(new_key, new_sux);
2305       }
2306       key = new_key;
2307       sux = new_sux;
2308     }
2309     if (res->length() == 0 || res->last() != range)  res->append(range);
2310   }
2311   return res;
2312 }
2313 
2314 
2315 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2316   LIRItem tag(x->tag(), this);
2317   tag.load_item();
2318   set_no_result(x);
2319 
2320   if (x->is_safepoint()) {
2321     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2322   }
2323 
2324   // move values into phi locations
2325   move_to_phi(x->state());
2326 
2327   int lo_key = x->lo_key();
2328   int len = x->length();
2329   assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
2330   LIR_Opr value = tag.result();
2331 
2332   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2333     ciMethod* method = x->state()->scope()->method();
2334     ciMethodData* md = method->method_data_or_null();
2335     assert(md != nullptr, "Sanity");
2336     ciProfileData* data = md->bci_to_data(x->state()->bci());
2337     assert(data != nullptr, "must have profiling data");
2338     assert(data->is_MultiBranchData(), "bad profile data?");
2339     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2340     LIR_Opr md_reg = new_register(T_METADATA);
2341     __ metadata2reg(md->constant_encoding(), md_reg);
2342     LIR_Opr data_offset_reg = new_pointer_register();
2343     LIR_Opr tmp_reg = new_pointer_register();
2344 
2345     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2346     for (int i = 0; i < len; i++) {
2347       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2348       __ cmp(lir_cond_equal, value, i + lo_key);
2349       __ move(data_offset_reg, tmp_reg);
2350       __ cmove(lir_cond_equal,
2351                LIR_OprFact::intptrConst(count_offset),
2352                tmp_reg,
2353                data_offset_reg, T_INT);
2354     }
2355 
2356     LIR_Opr data_reg = new_pointer_register();
2357     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2358     __ move(data_addr, data_reg);
2359     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2360     __ move(data_reg, data_addr);
2361   }
2362 
2363   if (UseTableRanges) {
2364     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2365   } else {
2366     for (int i = 0; i < len; i++) {
2367       __ cmp(lir_cond_equal, value, i + lo_key);
2368       __ branch(lir_cond_equal, x->sux_at(i));
2369     }
2370     __ jump(x->default_sux());
2371   }
2372 }
2373 
2374 
2375 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2376   LIRItem tag(x->tag(), this);
2377   tag.load_item();
2378   set_no_result(x);
2379 
2380   if (x->is_safepoint()) {
2381     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2382   }
2383 
2384   // move values into phi locations
2385   move_to_phi(x->state());
2386 
2387   LIR_Opr value = tag.result();
2388   int len = x->length();
2389 
2390   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2391     ciMethod* method = x->state()->scope()->method();
2392     ciMethodData* md = method->method_data_or_null();
2393     assert(md != nullptr, "Sanity");
2394     ciProfileData* data = md->bci_to_data(x->state()->bci());
2395     assert(data != nullptr, "must have profiling data");
2396     assert(data->is_MultiBranchData(), "bad profile data?");
2397     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2398     LIR_Opr md_reg = new_register(T_METADATA);
2399     __ metadata2reg(md->constant_encoding(), md_reg);
2400     LIR_Opr data_offset_reg = new_pointer_register();
2401     LIR_Opr tmp_reg = new_pointer_register();
2402 
2403     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2404     for (int i = 0; i < len; i++) {
2405       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2406       __ cmp(lir_cond_equal, value, x->key_at(i));
2407       __ move(data_offset_reg, tmp_reg);
2408       __ cmove(lir_cond_equal,
2409                LIR_OprFact::intptrConst(count_offset),
2410                tmp_reg,
2411                data_offset_reg, T_INT);
2412     }
2413 
2414     LIR_Opr data_reg = new_pointer_register();
2415     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2416     __ move(data_addr, data_reg);
2417     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2418     __ move(data_reg, data_addr);
2419   }
2420 
2421   if (UseTableRanges) {
2422     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2423   } else {
2424     int len = x->length();
2425     for (int i = 0; i < len; i++) {
2426       __ cmp(lir_cond_equal, value, x->key_at(i));
2427       __ branch(lir_cond_equal, x->sux_at(i));
2428     }
2429     __ jump(x->default_sux());
2430   }
2431 }
2432 
2433 
2434 void LIRGenerator::do_Goto(Goto* x) {
2435   set_no_result(x);
2436 
2437   if (block()->next()->as_OsrEntry()) {
2438     // need to free up storage used for OSR entry point
2439     LIR_Opr osrBuffer = block()->next()->operand();
2440     BasicTypeList signature;
2441     signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2442     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2443     __ move(osrBuffer, cc->args()->at(0));
2444     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2445                          getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2446   }
2447 
2448   if (x->is_safepoint()) {
2449     ValueStack* state = x->state_before() ? x->state_before() : x->state();
2450 
2451     // increment backedge counter if needed
2452     CodeEmitInfo* info = state_for(x, state);
2453     increment_backedge_counter(info, x->profiled_bci());
2454     CodeEmitInfo* safepoint_info = state_for(x, state);
2455     __ safepoint(safepoint_poll_register(), safepoint_info);
2456   }
2457 
2458   // Gotos can be folded Ifs, handle this case.
2459   if (x->should_profile()) {
2460     ciMethod* method = x->profiled_method();
2461     assert(method != nullptr, "method should be set if branch is profiled");
2462     ciMethodData* md = method->method_data_or_null();
2463     assert(md != nullptr, "Sanity");
2464     ciProfileData* data = md->bci_to_data(x->profiled_bci());
2465     assert(data != nullptr, "must have profiling data");
2466     int offset;
2467     if (x->direction() == Goto::taken) {
2468       assert(data->is_BranchData(), "need BranchData for two-way branches");
2469       offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2470     } else if (x->direction() == Goto::not_taken) {
2471       assert(data->is_BranchData(), "need BranchData for two-way branches");
2472       offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2473     } else {
2474       assert(data->is_JumpData(), "need JumpData for branches");
2475       offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2476     }
2477     LIR_Opr md_reg = new_register(T_METADATA);
2478     __ metadata2reg(md->constant_encoding(), md_reg);
2479 
2480     increment_counter(new LIR_Address(md_reg, offset,
2481                                       NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2482   }
2483 
2484   // emit phi-instruction move after safepoint since this simplifies
2485   // describing the state as the safepoint.
2486   move_to_phi(x->state());
2487 
2488   __ jump(x->default_sux());
2489 }
2490 
2491 /**
2492  * Emit profiling code if needed for arguments, parameters, return value types
2493  *
2494  * @param md                    MDO the code will update at runtime
2495  * @param md_base_offset        common offset in the MDO for this profile and subsequent ones
2496  * @param md_offset             offset in the MDO (on top of md_base_offset) for this profile
2497  * @param profiled_k            current profile
2498  * @param obj                   IR node for the object to be profiled
2499  * @param mdp                   register to hold the pointer inside the MDO (md + md_base_offset).
2500  *                              Set once we find an update to make and use for next ones.
2501  * @param not_null              true if we know obj cannot be null
2502  * @param signature_at_call_k   signature at call for obj
2503  * @param callee_signature_k    signature of callee for obj
2504  *                              at call and callee signatures differ at method handle call
2505  * @return                      the only klass we know will ever be seen at this profile point
2506  */
2507 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2508                                     Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2509                                     ciKlass* callee_signature_k) {
2510   ciKlass* result = nullptr;
2511   bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2512   bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2513   // known not to be null or null bit already set and already set to
2514   // unknown: nothing we can do to improve profiling
2515   if (!do_null && !do_update) {
2516     return result;
2517   }
2518 
2519   ciKlass* exact_klass = nullptr;
2520   Compilation* comp = Compilation::current();
2521   if (do_update) {
2522     // try to find exact type, using CHA if possible, so that loading
2523     // the klass from the object can be avoided
2524     ciType* type = obj->exact_type();
2525     if (type == nullptr) {
2526       type = obj->declared_type();
2527       type = comp->cha_exact_type(type);
2528     }
2529     assert(type == nullptr || type->is_klass(), "type should be class");
2530     exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2531 
2532     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2533   }
2534 
2535   if (!do_null && !do_update) {
2536     return result;
2537   }
2538 
2539   ciKlass* exact_signature_k = nullptr;
2540   if (do_update) {
2541     // Is the type from the signature exact (the only one possible)?
2542     exact_signature_k = signature_at_call_k->exact_klass();
2543     if (exact_signature_k == nullptr) {
2544       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2545     } else {
2546       result = exact_signature_k;
2547       // Known statically. No need to emit any code: prevent
2548       // LIR_Assembler::emit_profile_type() from emitting useless code
2549       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2550     }
2551     // exact_klass and exact_signature_k can be both non null but
2552     // different if exact_klass is loaded after the ciObject for
2553     // exact_signature_k is created.
2554     if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2555       // sometimes the type of the signature is better than the best type
2556       // the compiler has
2557       exact_klass = exact_signature_k;
2558     }
2559     if (callee_signature_k != nullptr &&
2560         callee_signature_k != signature_at_call_k) {
2561       ciKlass* improved_klass = callee_signature_k->exact_klass();
2562       if (improved_klass == nullptr) {
2563         improved_klass = comp->cha_exact_type(callee_signature_k);
2564       }
2565       if (exact_klass == nullptr && improved_klass != nullptr && exact_klass != improved_klass) {
2566         exact_klass = exact_signature_k;
2567       }
2568     }
2569     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2570   }
2571 
2572   if (!do_null && !do_update) {
2573     return result;
2574   }
2575 
2576   if (mdp == LIR_OprFact::illegalOpr) {
2577     mdp = new_register(T_METADATA);
2578     __ metadata2reg(md->constant_encoding(), mdp);
2579     if (md_base_offset != 0) {
2580       LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2581       mdp = new_pointer_register();
2582       __ leal(LIR_OprFact::address(base_type_address), mdp);
2583     }
2584   }
2585   LIRItem value(obj, this);
2586   value.load_item();
2587   __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2588                   value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != nullptr);
2589   return result;
2590 }
2591 
2592 // profile parameters on entry to the root of the compilation
2593 void LIRGenerator::profile_parameters(Base* x) {
2594   if (compilation()->profile_parameters()) {
2595     CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2596     ciMethodData* md = scope()->method()->method_data_or_null();
2597     assert(md != nullptr, "Sanity");
2598 
2599     if (md->parameters_type_data() != nullptr) {
2600       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2601       ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
2602       LIR_Opr mdp = LIR_OprFact::illegalOpr;
2603       for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2604         LIR_Opr src = args->at(i);
2605         assert(!src->is_illegal(), "check");
2606         BasicType t = src->type();
2607         if (is_reference_type(t)) {
2608           intptr_t profiled_k = parameters->type(j);
2609           Local* local = x->state()->local_at(java_index)->as_Local();
2610           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2611                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2612                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2613           // If the profile is known statically set it once for all and do not emit any code
2614           if (exact != nullptr) {
2615             md->set_parameter_type(j, exact);
2616           }
2617           j++;
2618         }
2619         java_index += type2size[t];
2620       }
2621     }
2622   }
2623 }
2624 
2625 void LIRGenerator::do_Base(Base* x) {
2626   __ std_entry(LIR_OprFact::illegalOpr);
2627   // Emit moves from physical registers / stack slots to virtual registers
2628   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2629   IRScope* irScope = compilation()->hir()->top_scope();
2630   int java_index = 0;
2631   for (int i = 0; i < args->length(); i++) {
2632     LIR_Opr src = args->at(i);
2633     assert(!src->is_illegal(), "check");
2634     BasicType t = src->type();
2635 
2636     // Types which are smaller than int are passed as int, so
2637     // correct the type which passed.
2638     switch (t) {
2639     case T_BYTE:
2640     case T_BOOLEAN:
2641     case T_SHORT:
2642     case T_CHAR:
2643       t = T_INT;
2644       break;
2645     default:
2646       break;
2647     }
2648 
2649     LIR_Opr dest = new_register(t);
2650     __ move(src, dest);
2651 
2652     // Assign new location to Local instruction for this local
2653     Local* local = x->state()->local_at(java_index)->as_Local();
2654     assert(local != nullptr, "Locals for incoming arguments must have been created");
2655 #ifndef __SOFTFP__
2656     // The java calling convention passes double as long and float as int.
2657     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2658 #endif // __SOFTFP__
2659     local->set_operand(dest);
2660 #ifdef ASSERT
2661     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, nullptr);
2662 #endif
2663     java_index += type2size[t];
2664   }
2665 
2666   if (compilation()->env()->dtrace_method_probes()) {
2667     BasicTypeList signature;
2668     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
2669     signature.append(T_METADATA); // Method*
2670     LIR_OprList* args = new LIR_OprList();
2671     args->append(getThreadPointer());
2672     LIR_Opr meth = new_register(T_METADATA);
2673     __ metadata2reg(method()->constant_encoding(), meth);
2674     args->append(meth);
2675     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, nullptr);
2676   }
2677 
2678   if (method()->is_synchronized()) {
2679     LIR_Opr obj;
2680     if (method()->is_static()) {
2681       obj = new_register(T_OBJECT);
2682       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2683     } else {
2684       Local* receiver = x->state()->local_at(0)->as_Local();
2685       assert(receiver != nullptr, "must already exist");
2686       obj = receiver->operand();
2687     }
2688     assert(obj->is_valid(), "must be valid");
2689 
2690     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2691       LIR_Opr lock = syncLockOpr();
2692       __ load_stack_address_monitor(0, lock);
2693 
2694       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
2695       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2696 
2697       // receiver is guaranteed non-null so don't need CodeEmitInfo
2698       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
2699     }
2700   }
2701   // increment invocation counters if needed
2702   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2703     profile_parameters(x);
2704     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
2705     increment_invocation_counter(info);
2706   }
2707 
2708   // all blocks with a successor must end with an unconditional jump
2709   // to the successor even if they are consecutive
2710   __ jump(x->default_sux());
2711 }
2712 
2713 
2714 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2715   // construct our frame and model the production of incoming pointer
2716   // to the OSR buffer.
2717   __ osr_entry(LIR_Assembler::osrBufferPointer());
2718   LIR_Opr result = rlock_result(x);
2719   __ move(LIR_Assembler::osrBufferPointer(), result);
2720 }
2721 
2722 
2723 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2724   assert(args->length() == arg_list->length(),
2725          "args=%d, arg_list=%d", args->length(), arg_list->length());
2726   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2727     LIRItem* param = args->at(i);
2728     LIR_Opr loc = arg_list->at(i);
2729     if (loc->is_register()) {
2730       param->load_item_force(loc);
2731     } else {
2732       LIR_Address* addr = loc->as_address_ptr();
2733       param->load_for_store(addr->type());
2734       if (addr->type() == T_OBJECT) {
2735         __ move_wide(param->result(), addr);
2736       } else
2737         __ move(param->result(), addr);
2738     }
2739   }
2740 
2741   if (x->has_receiver()) {
2742     LIRItem* receiver = args->at(0);
2743     LIR_Opr loc = arg_list->at(0);
2744     if (loc->is_register()) {
2745       receiver->load_item_force(loc);
2746     } else {
2747       assert(loc->is_address(), "just checking");
2748       receiver->load_for_store(T_OBJECT);
2749       __ move_wide(receiver->result(), loc->as_address_ptr());
2750     }
2751   }
2752 }
2753 
2754 
2755 // Visits all arguments, returns appropriate items without loading them
2756 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2757   LIRItemList* argument_items = new LIRItemList();
2758   if (x->has_receiver()) {
2759     LIRItem* receiver = new LIRItem(x->receiver(), this);
2760     argument_items->append(receiver);
2761   }
2762   for (int i = 0; i < x->number_of_arguments(); i++) {
2763     LIRItem* param = new LIRItem(x->argument_at(i), this);
2764     argument_items->append(param);
2765   }
2766   return argument_items;
2767 }
2768 
2769 
2770 // The invoke with receiver has following phases:
2771 //   a) traverse and load/lock receiver;
2772 //   b) traverse all arguments -> item-array (invoke_visit_argument)
2773 //   c) push receiver on stack
2774 //   d) load each of the items and push on stack
2775 //   e) unlock receiver
2776 //   f) move receiver into receiver-register %o0
2777 //   g) lock result registers and emit call operation
2778 //
2779 // Before issuing a call, we must spill-save all values on stack
2780 // that are in caller-save register. "spill-save" moves those registers
2781 // either in a free callee-save register or spills them if no free
2782 // callee save register is available.
2783 //
2784 // The problem is where to invoke spill-save.
2785 // - if invoked between e) and f), we may lock callee save
2786 //   register in "spill-save" that destroys the receiver register
2787 //   before f) is executed
2788 // - if we rearrange f) to be earlier (by loading %o0) it
2789 //   may destroy a value on the stack that is currently in %o0
2790 //   and is waiting to be spilled
2791 // - if we keep the receiver locked while doing spill-save,
2792 //   we cannot spill it as it is spill-locked
2793 //
2794 void LIRGenerator::do_Invoke(Invoke* x) {
2795   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2796 
2797   LIR_OprList* arg_list = cc->args();
2798   LIRItemList* args = invoke_visit_arguments(x);
2799   LIR_Opr receiver = LIR_OprFact::illegalOpr;
2800 
2801   // setup result register
2802   LIR_Opr result_register = LIR_OprFact::illegalOpr;
2803   if (x->type() != voidType) {
2804     result_register = result_register_for(x->type());
2805   }
2806 
2807   CodeEmitInfo* info = state_for(x, x->state());
2808 
2809   invoke_load_arguments(x, args, arg_list);
2810 
2811   if (x->has_receiver()) {
2812     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2813     receiver = args->at(0)->result();
2814   }
2815 
2816   // emit invoke code
2817   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2818 
2819   // JSR 292
2820   // Preserve the SP over MethodHandle call sites, if needed.
2821   ciMethod* target = x->target();
2822   bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2823                                   target->is_method_handle_intrinsic() ||
2824                                   target->is_compiled_lambda_form());
2825   if (is_method_handle_invoke) {
2826     info->set_is_method_handle_invoke(true);
2827     if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2828         __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2829     }
2830   }
2831 
2832   switch (x->code()) {
2833     case Bytecodes::_invokestatic:
2834       __ call_static(target, result_register,
2835                      SharedRuntime::get_resolve_static_call_stub(),
2836                      arg_list, info);
2837       break;
2838     case Bytecodes::_invokespecial:
2839     case Bytecodes::_invokevirtual:
2840     case Bytecodes::_invokeinterface:
2841       // for loaded and final (method or class) target we still produce an inline cache,
2842       // in order to be able to call mixed mode
2843       if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
2844         __ call_opt_virtual(target, receiver, result_register,
2845                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2846                             arg_list, info);
2847       } else {
2848         __ call_icvirtual(target, receiver, result_register,
2849                           SharedRuntime::get_resolve_virtual_call_stub(),
2850                           arg_list, info);
2851       }
2852       break;
2853     case Bytecodes::_invokedynamic: {
2854       __ call_dynamic(target, receiver, result_register,
2855                       SharedRuntime::get_resolve_static_call_stub(),
2856                       arg_list, info);
2857       break;
2858     }
2859     default:
2860       fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
2861       break;
2862   }
2863 
2864   // JSR 292
2865   // Restore the SP after MethodHandle call sites, if needed.
2866   if (is_method_handle_invoke
2867       && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2868     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2869   }
2870 
2871   if (result_register->is_valid()) {
2872     LIR_Opr result = rlock_result(x);
2873     __ move(result_register, result);
2874   }
2875 }
2876 
2877 
2878 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2879   assert(x->number_of_arguments() == 1, "wrong type");
2880   LIRItem value       (x->argument_at(0), this);
2881   LIR_Opr reg = rlock_result(x);
2882   value.load_item();
2883   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2884   __ move(tmp, reg);
2885 }
2886 
2887 
2888 
2889 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2890 void LIRGenerator::do_IfOp(IfOp* x) {
2891 #ifdef ASSERT
2892   {
2893     ValueTag xtag = x->x()->type()->tag();
2894     ValueTag ttag = x->tval()->type()->tag();
2895     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2896     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2897     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2898   }
2899 #endif
2900 
2901   LIRItem left(x->x(), this);
2902   LIRItem right(x->y(), this);
2903   left.load_item();
2904   if (can_inline_as_constant(right.value())) {
2905     right.dont_load_item();
2906   } else {
2907     right.load_item();
2908   }
2909 
2910   LIRItem t_val(x->tval(), this);
2911   LIRItem f_val(x->fval(), this);
2912   t_val.dont_load_item();
2913   f_val.dont_load_item();
2914   LIR_Opr reg = rlock_result(x);
2915 
2916   __ cmp(lir_cond(x->cond()), left.result(), right.result());
2917   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2918 }
2919 
2920 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
2921   assert(x->number_of_arguments() == 0, "wrong type");
2922   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
2923   BasicTypeList signature;
2924   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2925   LIR_Opr reg = result_register_for(x->type());
2926   __ call_runtime_leaf(routine, getThreadTemp(),
2927                        reg, new LIR_OprList());
2928   LIR_Opr result = rlock_result(x);
2929   __ move(reg, result);
2930 }
2931 
2932 
2933 
2934 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2935   switch (x->id()) {
2936   case vmIntrinsics::_intBitsToFloat      :
2937   case vmIntrinsics::_doubleToRawLongBits :
2938   case vmIntrinsics::_longBitsToDouble    :
2939   case vmIntrinsics::_floatToRawIntBits   : {
2940     do_FPIntrinsics(x);
2941     break;
2942   }
2943 
2944 #ifdef JFR_HAVE_INTRINSICS
2945   case vmIntrinsics::_counterTime:
2946     do_RuntimeCall(CAST_FROM_FN_PTR(address, JfrTime::time_function()), x);
2947     break;
2948 #endif
2949 
2950   case vmIntrinsics::_currentTimeMillis:
2951     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);
2952     break;
2953 
2954   case vmIntrinsics::_nanoTime:
2955     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);
2956     break;
2957 
2958   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
2959   case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
2960   case vmIntrinsics::_isPrimitive:    do_isPrimitive(x);   break;
2961   case vmIntrinsics::_getModifiers:   do_getModifiers(x);  break;
2962   case vmIntrinsics::_getClass:       do_getClass(x);      break;
2963   case vmIntrinsics::_getObjectSize:  do_getObjectSize(x); break;
2964   case vmIntrinsics::_currentCarrierThread: do_currentCarrierThread(x); break;
2965   case vmIntrinsics::_currentThread:  do_vthread(x);       break;
2966   case vmIntrinsics::_scopedValueCache: do_scopedValueCache(x); break;
2967 
2968   case vmIntrinsics::_dlog:           // fall through
2969   case vmIntrinsics::_dlog10:         // fall through
2970   case vmIntrinsics::_dabs:           // fall through
2971   case vmIntrinsics::_dsqrt:          // fall through
2972   case vmIntrinsics::_dsqrt_strict:   // fall through
2973   case vmIntrinsics::_dtan:           // fall through
2974   case vmIntrinsics::_dsin :          // fall through
2975   case vmIntrinsics::_dcos :          // fall through
2976   case vmIntrinsics::_dexp :          // fall through
2977   case vmIntrinsics::_dpow :          do_MathIntrinsic(x); break;
2978   case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
2979 
2980   case vmIntrinsics::_fmaD:           do_FmaIntrinsic(x); break;
2981   case vmIntrinsics::_fmaF:           do_FmaIntrinsic(x); break;
2982 
2983   // Use java.lang.Math intrinsics code since it works for these intrinsics too.
2984   case vmIntrinsics::_floatToFloat16: // fall through
2985   case vmIntrinsics::_float16ToFloat: do_MathIntrinsic(x); break;
2986 
2987   case vmIntrinsics::_Preconditions_checkIndex:
2988     do_PreconditionsCheckIndex(x, T_INT);
2989     break;
2990   case vmIntrinsics::_Preconditions_checkLongIndex:
2991     do_PreconditionsCheckIndex(x, T_LONG);
2992     break;
2993 
2994   case vmIntrinsics::_compareAndSetReference:
2995     do_CompareAndSwap(x, objectType);
2996     break;
2997   case vmIntrinsics::_compareAndSetInt:
2998     do_CompareAndSwap(x, intType);
2999     break;
3000   case vmIntrinsics::_compareAndSetLong:
3001     do_CompareAndSwap(x, longType);
3002     break;
3003 
3004   case vmIntrinsics::_loadFence :
3005     __ membar_acquire();
3006     break;
3007   case vmIntrinsics::_storeFence:
3008     __ membar_release();
3009     break;
3010   case vmIntrinsics::_storeStoreFence:
3011     __ membar_storestore();
3012     break;
3013   case vmIntrinsics::_fullFence :
3014     __ membar();
3015     break;
3016   case vmIntrinsics::_onSpinWait:
3017     __ on_spin_wait();
3018     break;
3019   case vmIntrinsics::_Reference_get:
3020     do_Reference_get(x);
3021     break;
3022 
3023   case vmIntrinsics::_updateCRC32:
3024   case vmIntrinsics::_updateBytesCRC32:
3025   case vmIntrinsics::_updateByteBufferCRC32:
3026     do_update_CRC32(x);
3027     break;
3028 
3029   case vmIntrinsics::_updateBytesCRC32C:
3030   case vmIntrinsics::_updateDirectByteBufferCRC32C:
3031     do_update_CRC32C(x);
3032     break;
3033 
3034   case vmIntrinsics::_vectorizedMismatch:
3035     do_vectorizedMismatch(x);
3036     break;
3037 
3038   case vmIntrinsics::_blackhole:
3039     do_blackhole(x);
3040     break;
3041 
3042   default: ShouldNotReachHere(); break;
3043   }
3044 }
3045 
3046 void LIRGenerator::profile_arguments(ProfileCall* x) {
3047   if (compilation()->profile_arguments()) {
3048     int bci = x->bci_of_invoke();
3049     ciMethodData* md = x->method()->method_data_or_null();
3050     assert(md != nullptr, "Sanity");
3051     ciProfileData* data = md->bci_to_data(bci);
3052     if (data != nullptr) {
3053       if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3054           (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3055         ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3056         int base_offset = md->byte_offset_of_slot(data, extra);
3057         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3058         ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3059 
3060         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3061         int start = 0;
3062         int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3063         if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3064           // first argument is not profiled at call (method handle invoke)
3065           assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3066           start = 1;
3067         }
3068         ciSignature* callee_signature = x->callee()->signature();
3069         // method handle call to virtual method
3070         bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3071         ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : nullptr);
3072 
3073         bool ignored_will_link;
3074         ciSignature* signature_at_call = nullptr;
3075         x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3076         ciSignatureStream signature_at_call_stream(signature_at_call);
3077 
3078         // if called through method handle invoke, some arguments may have been popped
3079         for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3080           int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3081           ciKlass* exact = profile_type(md, base_offset, off,
3082               args->type(i), x->profiled_arg_at(i+start), mdp,
3083               !x->arg_needs_null_check(i+start),
3084               signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3085           if (exact != nullptr) {
3086             md->set_argument_type(bci, i, exact);
3087           }
3088         }
3089       } else {
3090 #ifdef ASSERT
3091         Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3092         int n = x->nb_profiled_args();
3093         assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3094             (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3095             "only at JSR292 bytecodes");
3096 #endif
3097       }
3098     }
3099   }
3100 }
3101 
3102 // profile parameters on entry to an inlined method
3103 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3104   if (compilation()->profile_parameters() && x->inlined()) {
3105     ciMethodData* md = x->callee()->method_data_or_null();
3106     if (md != nullptr) {
3107       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3108       if (parameters_type_data != nullptr) {
3109         ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
3110         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3111         bool has_receiver = !x->callee()->is_static();
3112         ciSignature* sig = x->callee()->signature();
3113         ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : nullptr);
3114         int i = 0; // to iterate on the Instructions
3115         Value arg = x->recv();
3116         bool not_null = false;
3117         int bci = x->bci_of_invoke();
3118         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3119         // The first parameter is the receiver so that's what we start
3120         // with if it exists. One exception is method handle call to
3121         // virtual method: the receiver is in the args list
3122         if (arg == nullptr || !Bytecodes::has_receiver(bc)) {
3123           i = 1;
3124           arg = x->profiled_arg_at(0);
3125           not_null = !x->arg_needs_null_check(0);
3126         }
3127         int k = 0; // to iterate on the profile data
3128         for (;;) {
3129           intptr_t profiled_k = parameters->type(k);
3130           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3131                                         in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3132                                         profiled_k, arg, mdp, not_null, sig_stream.next_klass(), nullptr);
3133           // If the profile is known statically set it once for all and do not emit any code
3134           if (exact != nullptr) {
3135             md->set_parameter_type(k, exact);
3136           }
3137           k++;
3138           if (k >= parameters_type_data->number_of_parameters()) {
3139 #ifdef ASSERT
3140             int extra = 0;
3141             if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3142                 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3143                 x->recv() != nullptr && Bytecodes::has_receiver(bc)) {
3144               extra += 1;
3145             }
3146             assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3147 #endif
3148             break;
3149           }
3150           arg = x->profiled_arg_at(i);
3151           not_null = !x->arg_needs_null_check(i);
3152           i++;
3153         }
3154       }
3155     }
3156   }
3157 }
3158 
3159 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3160   // Need recv in a temporary register so it interferes with the other temporaries
3161   LIR_Opr recv = LIR_OprFact::illegalOpr;
3162   LIR_Opr mdo = new_register(T_METADATA);
3163   // tmp is used to hold the counters on SPARC
3164   LIR_Opr tmp = new_pointer_register();
3165 
3166   if (x->nb_profiled_args() > 0) {
3167     profile_arguments(x);
3168   }
3169 
3170   // profile parameters on inlined method entry including receiver
3171   if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3172     profile_parameters_at_call(x);
3173   }
3174 
3175   if (x->recv() != nullptr) {
3176     LIRItem value(x->recv(), this);
3177     value.load_item();
3178     recv = new_register(T_OBJECT);
3179     __ move(value.result(), recv);
3180   }
3181   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3182 }
3183 
3184 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3185   int bci = x->bci_of_invoke();
3186   ciMethodData* md = x->method()->method_data_or_null();
3187   assert(md != nullptr, "Sanity");
3188   ciProfileData* data = md->bci_to_data(bci);
3189   if (data != nullptr) {
3190     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3191     ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3192     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3193 
3194     bool ignored_will_link;
3195     ciSignature* signature_at_call = nullptr;
3196     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3197 
3198     // The offset within the MDO of the entry to update may be too large
3199     // to be used in load/store instructions on some platforms. So have
3200     // profile_type() compute the address of the profile in a register.
3201     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3202         ret->type(), x->ret(), mdp,
3203         !x->needs_null_check(),
3204         signature_at_call->return_type()->as_klass(),
3205         x->callee()->signature()->return_type()->as_klass());
3206     if (exact != nullptr) {
3207       md->set_return_type(bci, exact);
3208     }
3209   }
3210 }
3211 
3212 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3213   // We can safely ignore accessors here, since c2 will inline them anyway,
3214   // accessors are also always mature.
3215   if (!x->inlinee()->is_accessor()) {
3216     CodeEmitInfo* info = state_for(x, x->state(), true);
3217     // Notify the runtime very infrequently only to take care of counter overflows
3218     int freq_log = Tier23InlineeNotifyFreqLog;
3219     double scale;
3220     if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3221       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3222     }
3223     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3224   }
3225 }
3226 
3227 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3228   if (compilation()->is_profiling()) {
3229 #if defined(X86) && !defined(_LP64)
3230     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3231     LIR_Opr left_copy = new_register(left->type());
3232     __ move(left, left_copy);
3233     __ cmp(cond, left_copy, right);
3234 #else
3235     __ cmp(cond, left, right);
3236 #endif
3237     LIR_Opr step = new_register(T_INT);
3238     LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);
3239     LIR_Opr zero = LIR_OprFact::intConst(0);
3240     __ cmove(cond,
3241         (left_bci < bci) ? plus_one : zero,
3242         (right_bci < bci) ? plus_one : zero,
3243         step, left->type());
3244     increment_backedge_counter(info, step, bci);
3245   }
3246 }
3247 
3248 
3249 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {
3250   int freq_log = 0;
3251   int level = compilation()->env()->comp_level();
3252   if (level == CompLevel_limited_profile) {
3253     freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3254   } else if (level == CompLevel_full_profile) {
3255     freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3256   } else {
3257     ShouldNotReachHere();
3258   }
3259   // Increment the appropriate invocation/backedge counter and notify the runtime.
3260   double scale;
3261   if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3262     freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3263   }
3264   increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
3265 }
3266 
3267 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3268                                                 ciMethod *method, LIR_Opr step, int frequency,
3269                                                 int bci, bool backedge, bool notify) {
3270   assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3271   int level = _compilation->env()->comp_level();
3272   assert(level > CompLevel_simple, "Shouldn't be here");
3273 
3274   int offset = -1;
3275   LIR_Opr counter_holder;
3276   if (level == CompLevel_limited_profile) {
3277     MethodCounters* counters_adr = method->ensure_method_counters();
3278     if (counters_adr == nullptr) {
3279       bailout("method counters allocation failed");
3280       return;
3281     }
3282     counter_holder = new_pointer_register();
3283     __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3284     offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3285                                  MethodCounters::invocation_counter_offset());
3286   } else if (level == CompLevel_full_profile) {
3287     counter_holder = new_register(T_METADATA);
3288     offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3289                                  MethodData::invocation_counter_offset());
3290     ciMethodData* md = method->method_data_or_null();
3291     assert(md != nullptr, "Sanity");
3292     __ metadata2reg(md->constant_encoding(), counter_holder);
3293   } else {
3294     ShouldNotReachHere();
3295   }
3296   LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3297   LIR_Opr result = new_register(T_INT);
3298   __ load(counter, result);
3299   __ add(result, step, result);
3300   __ store(result, counter);
3301   if (notify && (!backedge || UseOnStackReplacement)) {
3302     LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
3303     // The bci for info can point to cmp for if's we want the if bci
3304     CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3305     int freq = frequency << InvocationCounter::count_shift;
3306     if (freq == 0) {
3307       if (!step->is_constant()) {
3308         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3309         __ branch(lir_cond_notEqual, overflow);
3310       } else {
3311         __ branch(lir_cond_always, overflow);
3312       }
3313     } else {
3314       LIR_Opr mask = load_immediate(freq, T_INT);
3315       if (!step->is_constant()) {
3316         // If step is 0, make sure the overflow check below always fails
3317         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3318         __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);
3319       }
3320       __ logical_and(result, mask, result);
3321       __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3322       __ branch(lir_cond_equal, overflow);
3323     }
3324     __ branch_destination(overflow->continuation());
3325   }
3326 }
3327 
3328 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3329   LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3330   BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3331 
3332   if (x->pass_thread()) {
3333     signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3334     args->append(getThreadPointer());
3335   }
3336 
3337   for (int i = 0; i < x->number_of_arguments(); i++) {
3338     Value a = x->argument_at(i);
3339     LIRItem* item = new LIRItem(a, this);
3340     item->load_item();
3341     args->append(item->result());
3342     signature->append(as_BasicType(a->type()));
3343   }
3344 
3345   LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), nullptr);
3346   if (x->type() == voidType) {
3347     set_no_result(x);
3348   } else {
3349     __ move(result, rlock_result(x));
3350   }
3351 }
3352 
3353 #ifdef ASSERT
3354 void LIRGenerator::do_Assert(Assert *x) {
3355   ValueTag tag = x->x()->type()->tag();
3356   If::Condition cond = x->cond();
3357 
3358   LIRItem xitem(x->x(), this);
3359   LIRItem yitem(x->y(), this);
3360   LIRItem* xin = &xitem;
3361   LIRItem* yin = &yitem;
3362 
3363   assert(tag == intTag, "Only integer assertions are valid!");
3364 
3365   xin->load_item();
3366   yin->dont_load_item();
3367 
3368   set_no_result(x);
3369 
3370   LIR_Opr left = xin->result();
3371   LIR_Opr right = yin->result();
3372 
3373   __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3374 }
3375 #endif
3376 
3377 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3378 
3379 
3380   Instruction *a = x->x();
3381   Instruction *b = x->y();
3382   if (!a || StressRangeCheckElimination) {
3383     assert(!b || StressRangeCheckElimination, "B must also be null");
3384 
3385     CodeEmitInfo *info = state_for(x, x->state());
3386     CodeStub* stub = new PredicateFailedStub(info);
3387 
3388     __ jump(stub);
3389   } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3390     int a_int = a->type()->as_IntConstant()->value();
3391     int b_int = b->type()->as_IntConstant()->value();
3392 
3393     bool ok = false;
3394 
3395     switch(x->cond()) {
3396       case Instruction::eql: ok = (a_int == b_int); break;
3397       case Instruction::neq: ok = (a_int != b_int); break;
3398       case Instruction::lss: ok = (a_int < b_int); break;
3399       case Instruction::leq: ok = (a_int <= b_int); break;
3400       case Instruction::gtr: ok = (a_int > b_int); break;
3401       case Instruction::geq: ok = (a_int >= b_int); break;
3402       case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3403       case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3404       default: ShouldNotReachHere();
3405     }
3406 
3407     if (ok) {
3408 
3409       CodeEmitInfo *info = state_for(x, x->state());
3410       CodeStub* stub = new PredicateFailedStub(info);
3411 
3412       __ jump(stub);
3413     }
3414   } else {
3415 
3416     ValueTag tag = x->x()->type()->tag();
3417     If::Condition cond = x->cond();
3418     LIRItem xitem(x->x(), this);
3419     LIRItem yitem(x->y(), this);
3420     LIRItem* xin = &xitem;
3421     LIRItem* yin = &yitem;
3422 
3423     assert(tag == intTag, "Only integer deoptimizations are valid!");
3424 
3425     xin->load_item();
3426     yin->dont_load_item();
3427     set_no_result(x);
3428 
3429     LIR_Opr left = xin->result();
3430     LIR_Opr right = yin->result();
3431 
3432     CodeEmitInfo *info = state_for(x, x->state());
3433     CodeStub* stub = new PredicateFailedStub(info);
3434 
3435     __ cmp(lir_cond(cond), left, right);
3436     __ branch(lir_cond(cond), stub);
3437   }
3438 }
3439 
3440 void LIRGenerator::do_blackhole(Intrinsic *x) {
3441   assert(!x->has_receiver(), "Should have been checked before: only static methods here");
3442   for (int c = 0; c < x->number_of_arguments(); c++) {
3443     // Load the argument
3444     LIRItem vitem(x->argument_at(c), this);
3445     vitem.load_item();
3446     // ...and leave it unused.
3447   }
3448 }
3449 
3450 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3451   LIRItemList args(1);
3452   LIRItem value(arg1, this);
3453   args.append(&value);
3454   BasicTypeList signature;
3455   signature.append(as_BasicType(arg1->type()));
3456 
3457   return call_runtime(&signature, &args, entry, result_type, info);
3458 }
3459 
3460 
3461 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3462   LIRItemList args(2);
3463   LIRItem value1(arg1, this);
3464   LIRItem value2(arg2, this);
3465   args.append(&value1);
3466   args.append(&value2);
3467   BasicTypeList signature;
3468   signature.append(as_BasicType(arg1->type()));
3469   signature.append(as_BasicType(arg2->type()));
3470 
3471   return call_runtime(&signature, &args, entry, result_type, info);
3472 }
3473 
3474 
3475 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3476                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3477   // get a result register
3478   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3479   LIR_Opr result = LIR_OprFact::illegalOpr;
3480   if (result_type->tag() != voidTag) {
3481     result = new_register(result_type);
3482     phys_reg = result_register_for(result_type);
3483   }
3484 
3485   // move the arguments into the correct location
3486   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3487   assert(cc->length() == args->length(), "argument mismatch");
3488   for (int i = 0; i < args->length(); i++) {
3489     LIR_Opr arg = args->at(i);
3490     LIR_Opr loc = cc->at(i);
3491     if (loc->is_register()) {
3492       __ move(arg, loc);
3493     } else {
3494       LIR_Address* addr = loc->as_address_ptr();
3495 //           if (!can_store_as_constant(arg)) {
3496 //             LIR_Opr tmp = new_register(arg->type());
3497 //             __ move(arg, tmp);
3498 //             arg = tmp;
3499 //           }
3500       __ move(arg, addr);
3501     }
3502   }
3503 
3504   if (info) {
3505     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3506   } else {
3507     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3508   }
3509   if (result->is_valid()) {
3510     __ move(phys_reg, result);
3511   }
3512   return result;
3513 }
3514 
3515 
3516 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3517                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3518   // get a result register
3519   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3520   LIR_Opr result = LIR_OprFact::illegalOpr;
3521   if (result_type->tag() != voidTag) {
3522     result = new_register(result_type);
3523     phys_reg = result_register_for(result_type);
3524   }
3525 
3526   // move the arguments into the correct location
3527   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3528 
3529   assert(cc->length() == args->length(), "argument mismatch");
3530   for (int i = 0; i < args->length(); i++) {
3531     LIRItem* arg = args->at(i);
3532     LIR_Opr loc = cc->at(i);
3533     if (loc->is_register()) {
3534       arg->load_item_force(loc);
3535     } else {
3536       LIR_Address* addr = loc->as_address_ptr();
3537       arg->load_for_store(addr->type());
3538       __ move(arg->result(), addr);
3539     }
3540   }
3541 
3542   if (info) {
3543     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3544   } else {
3545     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3546   }
3547   if (result->is_valid()) {
3548     __ move(phys_reg, result);
3549   }
3550   return result;
3551 }
3552 
3553 void LIRGenerator::do_MemBar(MemBar* x) {
3554   LIR_Code code = x->code();
3555   switch(code) {
3556   case lir_membar_acquire   : __ membar_acquire(); break;
3557   case lir_membar_release   : __ membar_release(); break;
3558   case lir_membar           : __ membar(); break;
3559   case lir_membar_loadload  : __ membar_loadload(); break;
3560   case lir_membar_storestore: __ membar_storestore(); break;
3561   case lir_membar_loadstore : __ membar_loadstore(); break;
3562   case lir_membar_storeload : __ membar_storeload(); break;
3563   default                   : ShouldNotReachHere(); break;
3564   }
3565 }
3566 
3567 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3568   LIR_Opr value_fixed = rlock_byte(T_BYTE);
3569   if (two_operand_lir_form) {
3570     __ move(value, value_fixed);
3571     __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3572   } else {
3573     __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3574   }
3575   LIR_Opr klass = new_register(T_METADATA);
3576   load_klass(array, klass, null_check_info);
3577   null_check_info = nullptr;
3578   LIR_Opr layout = new_register(T_INT);
3579   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3580   int diffbit = Klass::layout_helper_boolean_diffbit();
3581   __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3582   __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3583   __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3584   value = value_fixed;
3585   return value;
3586 }