1 /*
   2  * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Compilation.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"
  33 #include "ci/ciFlatArrayKlass.hpp"
  34 #include "ci/ciInlineKlass.hpp"
  35 #include "ci/ciInstance.hpp"
  36 #include "ci/ciObjArray.hpp"
  37 #include "ci/ciObjArrayKlass.hpp"
  38 #include "ci/ciUtilities.hpp"
  39 #include "compiler/compilerDefinitions.inline.hpp"
  40 #include "compiler/compilerOracle.hpp"
  41 #include "gc/shared/barrierSet.hpp"
  42 #include "gc/shared/c1/barrierSetC1.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "oops/methodCounters.hpp"
  45 #include "runtime/arguments.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "runtime/vm_version.hpp"
  49 #include "utilities/bitMap.inline.hpp"
  50 #include "utilities/macros.hpp"
  51 #include "utilities/powerOfTwo.hpp"
  52 
  53 #ifdef ASSERT
  54 #define __ gen()->lir(__FILE__, __LINE__)->
  55 #else
  56 #define __ gen()->lir()->
  57 #endif
  58 
  59 #ifndef PATCHED_ADDR
  60 #define PATCHED_ADDR  (max_jint)
  61 #endif
  62 
  63 void PhiResolverState::reset() {
  64   _virtual_operands.clear();
  65   _other_operands.clear();
  66   _vreg_table.clear();
  67 }
  68 
  69 
  70 //--------------------------------------------------------------
  71 // PhiResolver
  72 
  73 // Resolves cycles:
  74 //
  75 //  r1 := r2  becomes  temp := r1
  76 //  r2 := r1           r1 := r2
  77 //                     r2 := temp
  78 // and orders moves:
  79 //
  80 //  r2 := r3  becomes  r1 := r2
  81 //  r1 := r2           r2 := r3
  82 
  83 PhiResolver::PhiResolver(LIRGenerator* gen)
  84  : _gen(gen)
  85  , _state(gen->resolver_state())
  86  , _loop(nullptr)
  87  , _temp(LIR_OprFact::illegalOpr)
  88 {
  89   // reinitialize the shared state arrays
  90   _state.reset();
  91 }
  92 
  93 
  94 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
  95   assert(src->is_valid(), "");
  96   assert(dest->is_valid(), "");
  97   __ move(src, dest);
  98 }
  99 
 100 
 101 void PhiResolver::move_temp_to(LIR_Opr dest) {
 102   assert(_temp->is_valid(), "");
 103   emit_move(_temp, dest);
 104   NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
 105 }
 106 
 107 
 108 void PhiResolver::move_to_temp(LIR_Opr src) {
 109   assert(_temp->is_illegal(), "");
 110   _temp = _gen->new_register(src->type());
 111   emit_move(src, _temp);
 112 }
 113 
 114 
 115 // Traverse assignment graph in depth first order and generate moves in post order
 116 // ie. two assignments: b := c, a := b start with node c:
 117 // Call graph: move(null, c) -> move(c, b) -> move(b, a)
 118 // Generates moves in this order: move b to a and move c to b
 119 // ie. cycle a := b, b := a start with node a
 120 // Call graph: move(null, a) -> move(a, b) -> move(b, a)
 121 // Generates moves in this order: move b to temp, move a to b, move temp to a
 122 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
 123   if (!dest->visited()) {
 124     dest->set_visited();
 125     for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
 126       move(dest, dest->destination_at(i));
 127     }
 128   } else if (!dest->start_node()) {
 129     // cylce in graph detected
 130     assert(_loop == nullptr, "only one loop valid!");
 131     _loop = dest;
 132     move_to_temp(src->operand());
 133     return;
 134   } // else dest is a start node
 135 
 136   if (!dest->assigned()) {
 137     if (_loop == dest) {
 138       move_temp_to(dest->operand());
 139       dest->set_assigned();
 140     } else if (src != nullptr) {
 141       emit_move(src->operand(), dest->operand());
 142       dest->set_assigned();
 143     }
 144   }
 145 }
 146 
 147 
 148 PhiResolver::~PhiResolver() {
 149   int i;
 150   // resolve any cycles in moves from and to virtual registers
 151   for (i = virtual_operands().length() - 1; i >= 0; i --) {
 152     ResolveNode* node = virtual_operands().at(i);
 153     if (!node->visited()) {
 154       _loop = nullptr;
 155       move(nullptr, node);
 156       node->set_start_node();
 157       assert(_temp->is_illegal(), "move_temp_to() call missing");
 158     }
 159   }
 160 
 161   // generate move for move from non virtual register to abitrary destination
 162   for (i = other_operands().length() - 1; i >= 0; i --) {
 163     ResolveNode* node = other_operands().at(i);
 164     for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
 165       emit_move(node->operand(), node->destination_at(j)->operand());
 166     }
 167   }
 168 }
 169 
 170 
 171 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
 172   ResolveNode* node;
 173   if (opr->is_virtual()) {
 174     int vreg_num = opr->vreg_number();
 175     node = vreg_table().at_grow(vreg_num, nullptr);
 176     assert(node == nullptr || node->operand() == opr, "");
 177     if (node == nullptr) {
 178       node = new ResolveNode(opr);
 179       vreg_table().at_put(vreg_num, node);
 180     }
 181     // Make sure that all virtual operands show up in the list when
 182     // they are used as the source of a move.
 183     if (source && !virtual_operands().contains(node)) {
 184       virtual_operands().append(node);
 185     }
 186   } else {
 187     assert(source, "");
 188     node = new ResolveNode(opr);
 189     other_operands().append(node);
 190   }
 191   return node;
 192 }
 193 
 194 
 195 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
 196   assert(dest->is_virtual(), "");
 197   // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
 198   assert(src->is_valid(), "");
 199   assert(dest->is_valid(), "");
 200   ResolveNode* source = source_node(src);
 201   source->append(destination_node(dest));
 202 }
 203 
 204 
 205 //--------------------------------------------------------------
 206 // LIRItem
 207 
 208 void LIRItem::set_result(LIR_Opr opr) {
 209   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 210   value()->set_operand(opr);
 211 
 212 #ifdef ASSERT
 213   if (opr->is_virtual()) {
 214     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
 215   }
 216 #endif
 217 
 218   _result = opr;
 219 }
 220 
 221 void LIRItem::load_item() {
 222   assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
 223 
 224   if (result()->is_illegal()) {
 225     // update the items result
 226     _result = value()->operand();
 227   }
 228   if (!result()->is_register()) {
 229     LIR_Opr reg = _gen->new_register(value()->type());
 230     __ move(result(), reg);
 231     if (result()->is_constant()) {
 232       _result = reg;
 233     } else {
 234       set_result(reg);
 235     }
 236   }
 237 }
 238 
 239 
 240 void LIRItem::load_for_store(BasicType type) {
 241   if (_gen->can_store_as_constant(value(), type)) {
 242     _result = value()->operand();
 243     if (!_result->is_constant()) {
 244       _result = LIR_OprFact::value_type(value()->type());
 245     }
 246   } else if (type == T_BYTE || type == T_BOOLEAN) {
 247     load_byte_item();
 248   } else {
 249     load_item();
 250   }
 251 }
 252 
 253 void LIRItem::load_item_force(LIR_Opr reg) {
 254   LIR_Opr r = result();
 255   if (r != reg) {
 256 #if !defined(ARM) && !defined(E500V2)
 257     if (r->type() != reg->type()) {
 258       // moves between different types need an intervening spill slot
 259       r = _gen->force_to_spill(r, reg->type());
 260     }
 261 #endif
 262     __ move(r, reg);
 263     _result = reg;
 264   }
 265 }
 266 
 267 ciObject* LIRItem::get_jobject_constant() const {
 268   ObjectType* oc = type()->as_ObjectType();
 269   if (oc) {
 270     return oc->constant_value();
 271   }
 272   return nullptr;
 273 }
 274 
 275 
 276 jint LIRItem::get_jint_constant() const {
 277   assert(is_constant() && value() != nullptr, "");
 278   assert(type()->as_IntConstant() != nullptr, "type check");
 279   return type()->as_IntConstant()->value();
 280 }
 281 
 282 
 283 jint LIRItem::get_address_constant() const {
 284   assert(is_constant() && value() != nullptr, "");
 285   assert(type()->as_AddressConstant() != nullptr, "type check");
 286   return type()->as_AddressConstant()->value();
 287 }
 288 
 289 
 290 jfloat LIRItem::get_jfloat_constant() const {
 291   assert(is_constant() && value() != nullptr, "");
 292   assert(type()->as_FloatConstant() != nullptr, "type check");
 293   return type()->as_FloatConstant()->value();
 294 }
 295 
 296 
 297 jdouble LIRItem::get_jdouble_constant() const {
 298   assert(is_constant() && value() != nullptr, "");
 299   assert(type()->as_DoubleConstant() != nullptr, "type check");
 300   return type()->as_DoubleConstant()->value();
 301 }
 302 
 303 
 304 jlong LIRItem::get_jlong_constant() const {
 305   assert(is_constant() && value() != nullptr, "");
 306   assert(type()->as_LongConstant() != nullptr, "type check");
 307   return type()->as_LongConstant()->value();
 308 }
 309 
 310 
 311 
 312 //--------------------------------------------------------------
 313 
 314 
 315 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 316 #ifndef PRODUCT
 317   if (PrintIRWithLIR) {
 318     block->print();
 319   }
 320 #endif
 321 
 322   // set up the list of LIR instructions
 323   assert(block->lir() == nullptr, "LIR list already computed for this block");
 324   _lir = new LIR_List(compilation(), block);
 325   block->set_lir(_lir);
 326 
 327   __ branch_destination(block->label());
 328 
 329   if (LIRTraceExecution &&
 330       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
 331       !block->is_set(BlockBegin::exception_entry_flag)) {
 332     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
 333     trace_block_entry(block);
 334   }
 335 }
 336 
 337 
 338 void LIRGenerator::block_do_epilog(BlockBegin* block) {
 339 #ifndef PRODUCT
 340   if (PrintIRWithLIR) {
 341     tty->cr();
 342   }
 343 #endif
 344 
 345   // LIR_Opr for unpinned constants shouldn't be referenced by other
 346   // blocks so clear them out after processing the block.
 347   for (int i = 0; i < _unpinned_constants.length(); i++) {
 348     _unpinned_constants.at(i)->clear_operand();
 349   }
 350   _unpinned_constants.trunc_to(0);
 351 
 352   // clear our any registers for other local constants
 353   _constants.trunc_to(0);
 354   _reg_for_constants.trunc_to(0);
 355 }
 356 
 357 
 358 void LIRGenerator::block_do(BlockBegin* block) {
 359   CHECK_BAILOUT();
 360 
 361   block_do_prolog(block);
 362   set_block(block);
 363 
 364   for (Instruction* instr = block; instr != nullptr; instr = instr->next()) {
 365     if (instr->is_pinned()) do_root(instr);
 366   }
 367 
 368   set_block(nullptr);
 369   block_do_epilog(block);
 370 }
 371 
 372 
 373 //-------------------------LIRGenerator-----------------------------
 374 
 375 // This is where the tree-walk starts; instr must be root;
 376 void LIRGenerator::do_root(Value instr) {
 377   CHECK_BAILOUT();
 378 
 379   InstructionMark im(compilation(), instr);
 380 
 381   assert(instr->is_pinned(), "use only with roots");
 382   assert(instr->subst() == instr, "shouldn't have missed substitution");
 383 
 384   instr->visit(this);
 385 
 386   assert(!instr->has_uses() || instr->operand()->is_valid() ||
 387          instr->as_Constant() != nullptr || bailed_out(), "invalid item set");
 388 }
 389 
 390 
 391 // This is called for each node in tree; the walk stops if a root is reached
 392 void LIRGenerator::walk(Value instr) {
 393   InstructionMark im(compilation(), instr);
 394   //stop walk when encounter a root
 395   if ((instr->is_pinned() && instr->as_Phi() == nullptr) || instr->operand()->is_valid()) {
 396     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != nullptr, "this root has not yet been visited");
 397   } else {
 398     assert(instr->subst() == instr, "shouldn't have missed substitution");
 399     instr->visit(this);
 400     // assert(instr->use_count() > 0 || instr->as_Phi() != nullptr, "leaf instruction must have a use");
 401   }
 402 }
 403 
 404 
 405 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
 406   assert(state != nullptr, "state must be defined");
 407 
 408 #ifndef PRODUCT
 409   state->verify();
 410 #endif
 411 
 412   ValueStack* s = state;
 413   for_each_state(s) {
 414     if (s->kind() == ValueStack::EmptyExceptionState ||
 415         s->kind() == ValueStack::CallerEmptyExceptionState)
 416     {
 417 #ifdef ASSERT
 418       int index;
 419       Value value;
 420       for_each_stack_value(s, index, value) {
 421         fatal("state must be empty");
 422       }
 423       for_each_local_value(s, index, value) {
 424         fatal("state must be empty");
 425       }
 426 #endif
 427       assert(s->locks_size() == 0 || s->locks_size() == 1, "state must be empty");
 428       continue;
 429     }
 430 
 431     int index;
 432     Value value;
 433     for_each_stack_value(s, index, value) {
 434       assert(value->subst() == value, "missed substitution");
 435       if (!value->is_pinned() && value->as_Constant() == nullptr && value->as_Local() == nullptr) {
 436         walk(value);
 437         assert(value->operand()->is_valid(), "must be evaluated now");
 438       }
 439     }
 440 
 441     int bci = s->bci();
 442     IRScope* scope = s->scope();
 443     ciMethod* method = scope->method();
 444 
 445     MethodLivenessResult liveness = method->liveness_at_bci(bci);
 446     if (bci == SynchronizationEntryBCI) {
 447       if (x->as_ExceptionObject() || x->as_Throw()) {
 448         // all locals are dead on exit from the synthetic unlocker
 449         liveness.clear();
 450       } else {
 451         assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
 452       }
 453     }
 454     if (!liveness.is_valid()) {
 455       // Degenerate or breakpointed method.
 456       bailout("Degenerate or breakpointed method");
 457     } else {
 458       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
 459       for_each_local_value(s, index, value) {
 460         assert(value->subst() == value, "missed substitution");
 461         if (liveness.at(index) && !value->type()->is_illegal()) {
 462           if (!value->is_pinned() && value->as_Constant() == nullptr && value->as_Local() == nullptr) {
 463             walk(value);
 464             assert(value->operand()->is_valid(), "must be evaluated now");
 465           }
 466         } else {
 467           // null out this local so that linear scan can assume that all non-null values are live.
 468           s->invalidate_local(index);
 469         }
 470       }
 471     }
 472   }
 473 
 474   return new CodeEmitInfo(state, ignore_xhandler ? nullptr : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
 475 }
 476 
 477 
 478 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
 479   return state_for(x, x->exception_state());
 480 }
 481 
 482 
 483 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
 484   /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
 485    * is active and the class hasn't yet been resolved we need to emit a patch that resolves
 486    * the class. */
 487   if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
 488     assert(info != nullptr, "info must be set if class is not loaded");
 489     __ klass2reg_patch(nullptr, r, info);
 490   } else {
 491     // no patching needed
 492     __ metadata2reg(obj->constant_encoding(), r);
 493   }
 494 }
 495 
 496 
 497 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
 498                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
 499   CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
 500   if (index->is_constant()) {
 501     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
 502                 index->as_jint(), null_check_info);
 503     __ branch(lir_cond_belowEqual, stub); // forward branch
 504   } else {
 505     cmp_reg_mem(lir_cond_aboveEqual, index, array,
 506                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
 507     __ branch(lir_cond_aboveEqual, stub); // forward branch
 508   }
 509 }
 510 
 511 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp_op, CodeEmitInfo* info) {
 512   LIR_Opr result_op = result;
 513   LIR_Opr left_op   = left;
 514   LIR_Opr right_op  = right;
 515 
 516   if (two_operand_lir_form && left_op != result_op) {
 517     assert(right_op != result_op, "malformed");
 518     __ move(left_op, result_op);
 519     left_op = result_op;
 520   }
 521 
 522   switch(code) {
 523     case Bytecodes::_dadd:
 524     case Bytecodes::_fadd:
 525     case Bytecodes::_ladd:
 526     case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
 527     case Bytecodes::_fmul:
 528     case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
 529 
 530     case Bytecodes::_dmul:  __ mul(left_op, right_op, result_op, tmp_op); break;
 531 
 532     case Bytecodes::_imul:
 533       {
 534         bool did_strength_reduce = false;
 535 
 536         if (right->is_constant()) {
 537           jint c = right->as_jint();
 538           if (c > 0 && is_power_of_2(c)) {
 539             // do not need tmp here
 540             __ shift_left(left_op, exact_log2(c), result_op);
 541             did_strength_reduce = true;
 542           } else {
 543             did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
 544           }
 545         }
 546         // we couldn't strength reduce so just emit the multiply
 547         if (!did_strength_reduce) {
 548           __ mul(left_op, right_op, result_op);
 549         }
 550       }
 551       break;
 552 
 553     case Bytecodes::_dsub:
 554     case Bytecodes::_fsub:
 555     case Bytecodes::_lsub:
 556     case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
 557 
 558     case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
 559     // ldiv and lrem are implemented with a direct runtime call
 560 
 561     case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break;
 562 
 563     case Bytecodes::_drem:
 564     case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
 565 
 566     default: ShouldNotReachHere();
 567   }
 568 }
 569 
 570 
 571 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 572   arithmetic_op(code, result, left, right, tmp);
 573 }
 574 
 575 
 576 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
 577   arithmetic_op(code, result, left, right, LIR_OprFact::illegalOpr, info);
 578 }
 579 
 580 
 581 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 582   arithmetic_op(code, result, left, right, tmp);
 583 }
 584 
 585 
 586 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
 587 
 588   if (two_operand_lir_form && value != result_op
 589       // Only 32bit right shifts require two operand form on S390.
 590       S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
 591     assert(count != result_op, "malformed");
 592     __ move(value, result_op);
 593     value = result_op;
 594   }
 595 
 596   assert(count->is_constant() || count->is_register(), "must be");
 597   switch(code) {
 598   case Bytecodes::_ishl:
 599   case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
 600   case Bytecodes::_ishr:
 601   case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
 602   case Bytecodes::_iushr:
 603   case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
 604   default: ShouldNotReachHere();
 605   }
 606 }
 607 
 608 
 609 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
 610   if (two_operand_lir_form && left_op != result_op) {
 611     assert(right_op != result_op, "malformed");
 612     __ move(left_op, result_op);
 613     left_op = result_op;
 614   }
 615 
 616   switch(code) {
 617     case Bytecodes::_iand:
 618     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 619 
 620     case Bytecodes::_ior:
 621     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 622 
 623     case Bytecodes::_ixor:
 624     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 625 
 626     default: ShouldNotReachHere();
 627   }
 628 }
 629 
 630 
 631 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
 632                                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_ie_stub) {
 633   // for slow path, use debug info for state after successful locking
 634   CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_ie_stub, scratch);
 635   __ load_stack_address_monitor(monitor_no, lock);
 636   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 637   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_ie_stub);
 638 }
 639 
 640 
 641 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 642   // setup registers
 643   LIR_Opr hdr = lock;
 644   lock = new_hdr;
 645   CodeStub* slow_path = new MonitorExitStub(lock, monitor_no);
 646   __ load_stack_address_monitor(monitor_no, lock);
 647   __ unlock_object(hdr, object, lock, scratch, slow_path);
 648 }
 649 
 650 #ifndef PRODUCT
 651 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 652   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 653     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 654   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 655     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 656   }
 657 }
 658 #endif
 659 
 660 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 661   if (allow_inline) {
 662     assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
 663     __ metadata2reg(klass->constant_encoding(), klass_reg);
 664   } else {
 665     klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 666   }
 667   // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
 668   if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
 669       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 670 
 671     StubId stub_id = klass->is_initialized() ? StubId::c1_fast_new_instance_id : StubId::c1_fast_new_instance_init_check_id;
 672 
 673     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 674 
 675     assert(klass->is_loaded(), "must be loaded");
 676     // allocate space for instance
 677     assert(klass->size_helper() > 0, "illegal instance size");
 678     const int instance_size = align_object_size(klass->size_helper());
 679     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 680                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 681   } else {
 682     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, StubId::c1_new_instance_id);
 683     __ jump(slow_path);
 684     __ branch_destination(slow_path->continuation());
 685   }
 686 }
 687 
 688 
 689 static bool is_constant_zero(Instruction* inst) {
 690   IntConstant* c = inst->type()->as_IntConstant();
 691   if (c) {
 692     return (c->value() == 0);
 693   }
 694   return false;
 695 }
 696 
 697 
 698 static bool positive_constant(Instruction* inst) {
 699   IntConstant* c = inst->type()->as_IntConstant();
 700   if (c) {
 701     return (c->value() >= 0);
 702   }
 703   return false;
 704 }
 705 
 706 
 707 static ciArrayKlass* as_array_klass(ciType* type) {
 708   if (type != nullptr && type->is_array_klass() && type->is_loaded()) {
 709     return (ciArrayKlass*)type;
 710   } else {
 711     return nullptr;
 712   }
 713 }
 714 
 715 static ciType* phi_declared_type(Phi* phi) {
 716   ciType* t = phi->operand_at(0)->declared_type();
 717   if (t == nullptr) {
 718     return nullptr;
 719   }
 720   for(int i = 1; i < phi->operand_count(); i++) {
 721     if (t != phi->operand_at(i)->declared_type()) {
 722       return nullptr;
 723     }
 724   }
 725   return t;
 726 }
 727 
 728 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
 729   Instruction* src     = x->argument_at(0);
 730   Instruction* src_pos = x->argument_at(1);
 731   Instruction* dst     = x->argument_at(2);
 732   Instruction* dst_pos = x->argument_at(3);
 733   Instruction* length  = x->argument_at(4);
 734 
 735   // first try to identify the likely type of the arrays involved
 736   ciArrayKlass* expected_type = nullptr;
 737   bool is_exact = false, src_objarray = false, dst_objarray = false;
 738   {
 739     ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
 740     ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
 741     Phi* phi;
 742     if (src_declared_type == nullptr && (phi = src->as_Phi()) != nullptr) {
 743       src_declared_type = as_array_klass(phi_declared_type(phi));
 744     }
 745     ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
 746     ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
 747     if (dst_declared_type == nullptr && (phi = dst->as_Phi()) != nullptr) {
 748       dst_declared_type = as_array_klass(phi_declared_type(phi));
 749     }
 750 
 751     if (src_exact_type != nullptr && src_exact_type == dst_exact_type) {
 752       // the types exactly match so the type is fully known
 753       is_exact = true;
 754       expected_type = src_exact_type;
 755     } else if (dst_exact_type != nullptr && dst_exact_type->is_obj_array_klass()) {
 756       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
 757       ciArrayKlass* src_type = nullptr;
 758       if (src_exact_type != nullptr && src_exact_type->is_obj_array_klass()) {
 759         src_type = (ciArrayKlass*) src_exact_type;
 760       } else if (src_declared_type != nullptr && src_declared_type->is_obj_array_klass()) {
 761         src_type = (ciArrayKlass*) src_declared_type;
 762       }
 763       if (src_type != nullptr) {
 764         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 765           is_exact = true;
 766           expected_type = dst_type;
 767         }
 768       }
 769     }
 770     // at least pass along a good guess
 771     if (expected_type == nullptr) expected_type = dst_exact_type;
 772     if (expected_type == nullptr) expected_type = src_declared_type;
 773     if (expected_type == nullptr) expected_type = dst_declared_type;
 774 
 775     if (expected_type != nullptr && expected_type->is_obj_array_klass()) {
 776       // For a direct pointer comparison, we need the refined array klass pointer
 777       expected_type = ciObjArrayKlass::make(expected_type->as_array_klass()->element_klass());
 778     }
 779 
 780     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 781     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 782   }
 783 
 784   // if a probable array type has been identified, figure out if any
 785   // of the required checks for a fast case can be elided.
 786   int flags = LIR_OpArrayCopy::all_flags;
 787 
 788   if (!src->is_loaded_flat_array() && !dst->is_loaded_flat_array()) {
 789     flags &= ~LIR_OpArrayCopy::always_slow_path;
 790   }
 791   if (!src->maybe_flat_array()) {
 792     flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
 793   }
 794   if (!dst->maybe_flat_array() && !dst->maybe_null_free_array()) {
 795     flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
 796   }
 797 
 798   if (!src_objarray)
 799     flags &= ~LIR_OpArrayCopy::src_objarray;
 800   if (!dst_objarray)
 801     flags &= ~LIR_OpArrayCopy::dst_objarray;
 802 
 803   if (!x->arg_needs_null_check(0))
 804     flags &= ~LIR_OpArrayCopy::src_null_check;
 805   if (!x->arg_needs_null_check(2))
 806     flags &= ~LIR_OpArrayCopy::dst_null_check;
 807 
 808 
 809   if (expected_type != nullptr) {
 810     Value length_limit = nullptr;
 811 
 812     IfOp* ifop = length->as_IfOp();
 813     if (ifop != nullptr) {
 814       // look for expressions like min(v, a.length) which ends up as
 815       //   x > y ? y : x  or  x >= y ? y : x
 816       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 817           ifop->x() == ifop->fval() &&
 818           ifop->y() == ifop->tval()) {
 819         length_limit = ifop->y();
 820       }
 821     }
 822 
 823     // try to skip null checks and range checks
 824     NewArray* src_array = src->as_NewArray();
 825     if (src_array != nullptr) {
 826       flags &= ~LIR_OpArrayCopy::src_null_check;
 827       if (length_limit != nullptr &&
 828           src_array->length() == length_limit &&
 829           is_constant_zero(src_pos)) {
 830         flags &= ~LIR_OpArrayCopy::src_range_check;
 831       }
 832     }
 833 
 834     NewArray* dst_array = dst->as_NewArray();
 835     if (dst_array != nullptr) {
 836       flags &= ~LIR_OpArrayCopy::dst_null_check;
 837       if (length_limit != nullptr &&
 838           dst_array->length() == length_limit &&
 839           is_constant_zero(dst_pos)) {
 840         flags &= ~LIR_OpArrayCopy::dst_range_check;
 841       }
 842     }
 843 
 844     // check from incoming constant values
 845     if (positive_constant(src_pos))
 846       flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
 847     if (positive_constant(dst_pos))
 848       flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
 849     if (positive_constant(length))
 850       flags &= ~LIR_OpArrayCopy::length_positive_check;
 851 
 852     // see if the range check can be elided, which might also imply
 853     // that src or dst is non-null.
 854     ArrayLength* al = length->as_ArrayLength();
 855     if (al != nullptr) {
 856       if (al->array() == src) {
 857         // it's the length of the source array
 858         flags &= ~LIR_OpArrayCopy::length_positive_check;
 859         flags &= ~LIR_OpArrayCopy::src_null_check;
 860         if (is_constant_zero(src_pos))
 861           flags &= ~LIR_OpArrayCopy::src_range_check;
 862       }
 863       if (al->array() == dst) {
 864         // it's the length of the destination array
 865         flags &= ~LIR_OpArrayCopy::length_positive_check;
 866         flags &= ~LIR_OpArrayCopy::dst_null_check;
 867         if (is_constant_zero(dst_pos))
 868           flags &= ~LIR_OpArrayCopy::dst_range_check;
 869       }
 870     }
 871     if (is_exact) {
 872       flags &= ~LIR_OpArrayCopy::type_check;
 873     }
 874   }
 875 
 876   IntConstant* src_int = src_pos->type()->as_IntConstant();
 877   IntConstant* dst_int = dst_pos->type()->as_IntConstant();
 878   if (src_int && dst_int) {
 879     int s_offs = src_int->value();
 880     int d_offs = dst_int->value();
 881     if (src_int->value() >= dst_int->value()) {
 882       flags &= ~LIR_OpArrayCopy::overlapping;
 883     }
 884     if (expected_type != nullptr) {
 885       BasicType t = expected_type->element_type()->basic_type();
 886       int element_size = type2aelembytes(t);
 887       if (((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) &&
 888           ((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0)) {
 889         flags &= ~LIR_OpArrayCopy::unaligned;
 890       }
 891     }
 892   } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
 893     // src and dest positions are the same, or dst is zero so assume
 894     // nonoverlapping copy.
 895     flags &= ~LIR_OpArrayCopy::overlapping;
 896   }
 897 
 898   if (src == dst) {
 899     // moving within a single array so no type checks are needed
 900     if (flags & LIR_OpArrayCopy::type_check) {
 901       flags &= ~LIR_OpArrayCopy::type_check;
 902     }
 903   }
 904   *flagsp = flags;
 905   *expected_typep = (ciArrayKlass*)expected_type;
 906 }
 907 
 908 
 909 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
 910   assert(type2size[t] == type2size[value->type()],
 911          "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
 912   if (!value->is_register()) {
 913     // force into a register
 914     LIR_Opr r = new_register(value->type());
 915     __ move(value, r);
 916     value = r;
 917   }
 918 
 919   // create a spill location
 920   LIR_Opr tmp = new_register(t);
 921   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
 922 
 923   // move from register to spill
 924   __ move(value, tmp);
 925   return tmp;
 926 }
 927 
 928 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
 929   if (if_instr->should_profile()) {
 930     ciMethod* method = if_instr->profiled_method();
 931     assert(method != nullptr, "method should be set if branch is profiled");
 932     ciMethodData* md = method->method_data_or_null();
 933     assert(md != nullptr, "Sanity");
 934     ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
 935     assert(data != nullptr, "must have profiling data");
 936     assert(data->is_BranchData(), "need BranchData for two-way branches");
 937     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
 938     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
 939     if (if_instr->is_swapped()) {
 940       int t = taken_count_offset;
 941       taken_count_offset = not_taken_count_offset;
 942       not_taken_count_offset = t;
 943     }
 944 
 945     LIR_Opr md_reg = new_register(T_METADATA);
 946     __ metadata2reg(md->constant_encoding(), md_reg);
 947 
 948     LIR_Opr data_offset_reg = new_pointer_register();
 949     __ cmove(lir_cond(cond),
 950              LIR_OprFact::intptrConst(taken_count_offset),
 951              LIR_OprFact::intptrConst(not_taken_count_offset),
 952              data_offset_reg, as_BasicType(if_instr->x()->type()));
 953 
 954     // MDO cells are intptr_t, so the data_reg width is arch-dependent.
 955     LIR_Opr data_reg = new_pointer_register();
 956     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
 957     __ move(data_addr, data_reg);
 958     // Use leal instead of add to avoid destroying condition codes on x86
 959     LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
 960     __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
 961     __ move(data_reg, data_addr);
 962   }
 963 }
 964 
 965 // Phi technique:
 966 // This is about passing live values from one basic block to the other.
 967 // In code generated with Java it is rather rare that more than one
 968 // value is on the stack from one basic block to the other.
 969 // We optimize our technique for efficient passing of one value
 970 // (of type long, int, double..) but it can be extended.
 971 // When entering or leaving a basic block, all registers and all spill
 972 // slots are release and empty. We use the released registers
 973 // and spill slots to pass the live values from one block
 974 // to the other. The topmost value, i.e., the value on TOS of expression
 975 // stack is passed in registers. All other values are stored in spilling
 976 // area. Every Phi has an index which designates its spill slot
 977 // At exit of a basic block, we fill the register(s) and spill slots.
 978 // At entry of a basic block, the block_prolog sets up the content of phi nodes
 979 // and locks necessary registers and spilling slots.
 980 
 981 
 982 // move current value to referenced phi function
 983 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
 984   Phi* phi = sux_val->as_Phi();
 985   // cur_val can be null without phi being null in conjunction with inlining
 986   if (phi != nullptr && cur_val != nullptr && cur_val != phi && !phi->is_illegal()) {
 987     if (phi->is_local()) {
 988       for (int i = 0; i < phi->operand_count(); i++) {
 989         Value op = phi->operand_at(i);
 990         if (op != nullptr && op->type()->is_illegal()) {
 991           bailout("illegal phi operand");
 992         }
 993       }
 994     }
 995     Phi* cur_phi = cur_val->as_Phi();
 996     if (cur_phi != nullptr && cur_phi->is_illegal()) {
 997       // Phi and local would need to get invalidated
 998       // (which is unexpected for Linear Scan).
 999       // But this case is very rare so we simply bail out.
1000       bailout("propagation of illegal phi");
1001       return;
1002     }
1003     LIR_Opr operand = cur_val->operand();
1004     if (operand->is_illegal()) {
1005       assert(cur_val->as_Constant() != nullptr || cur_val->as_Local() != nullptr,
1006              "these can be produced lazily");
1007       operand = operand_for_instruction(cur_val);
1008     }
1009     resolver->move(operand, operand_for_instruction(phi));
1010   }
1011 }
1012 
1013 
1014 // Moves all stack values into their PHI position
1015 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1016   BlockBegin* bb = block();
1017   if (bb->number_of_sux() == 1) {
1018     BlockBegin* sux = bb->sux_at(0);
1019     assert(sux->number_of_preds() > 0, "invalid CFG");
1020 
1021     // a block with only one predecessor never has phi functions
1022     if (sux->number_of_preds() > 1) {
1023       PhiResolver resolver(this);
1024 
1025       ValueStack* sux_state = sux->state();
1026       Value sux_value;
1027       int index;
1028 
1029       assert(cur_state->scope() == sux_state->scope(), "not matching");
1030       assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1031       assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1032 
1033       for_each_stack_value(sux_state, index, sux_value) {
1034         move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1035       }
1036 
1037       for_each_local_value(sux_state, index, sux_value) {
1038         move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1039       }
1040 
1041       assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1042     }
1043   }
1044 }
1045 
1046 
1047 LIR_Opr LIRGenerator::new_register(BasicType type) {
1048   int vreg_num = _virtual_register_number;
1049   // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
1050   // a few extra registers before we really run out which helps to avoid to trip over assertions.
1051   if (vreg_num + 20 >= LIR_Opr::vreg_max) {
1052     bailout("out of virtual registers in LIR generator");
1053     if (vreg_num + 2 >= LIR_Opr::vreg_max) {
1054       // Wrap it around and continue until bailout really happens to avoid hitting assertions.
1055       _virtual_register_number = LIR_Opr::vreg_base;
1056       vreg_num = LIR_Opr::vreg_base;
1057     }
1058   }
1059   _virtual_register_number += 1;
1060   LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);
1061   assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
1062   return vreg;
1063 }
1064 
1065 
1066 // Try to lock using register in hint
1067 LIR_Opr LIRGenerator::rlock(Value instr) {
1068   return new_register(instr->type());
1069 }
1070 
1071 
1072 // does an rlock and sets result
1073 LIR_Opr LIRGenerator::rlock_result(Value x) {
1074   LIR_Opr reg = rlock(x);
1075   set_result(x, reg);
1076   return reg;
1077 }
1078 
1079 
1080 // does an rlock and sets result
1081 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1082   LIR_Opr reg;
1083   switch (type) {
1084   case T_BYTE:
1085   case T_BOOLEAN:
1086     reg = rlock_byte(type);
1087     break;
1088   default:
1089     reg = rlock(x);
1090     break;
1091   }
1092 
1093   set_result(x, reg);
1094   return reg;
1095 }
1096 
1097 
1098 //---------------------------------------------------------------------
1099 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1100   ObjectType* oc = value->type()->as_ObjectType();
1101   if (oc) {
1102     return oc->constant_value();
1103   }
1104   return nullptr;
1105 }
1106 
1107 
1108 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1109   assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1110   assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1111 
1112   // no moves are created for phi functions at the begin of exception
1113   // handlers, so assign operands manually here
1114   for_each_phi_fun(block(), phi,
1115                    if (!phi->is_illegal()) { operand_for_instruction(phi); });
1116 
1117   LIR_Opr thread_reg = getThreadPointer();
1118   __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1119                exceptionOopOpr());
1120   __ move_wide(LIR_OprFact::oopConst(nullptr),
1121                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1122   __ move_wide(LIR_OprFact::oopConst(nullptr),
1123                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1124 
1125   LIR_Opr result = new_register(T_OBJECT);
1126   __ move(exceptionOopOpr(), result);
1127   set_result(x, result);
1128 }
1129 
1130 
1131 //----------------------------------------------------------------------
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //                        visitor functions
1136 //----------------------------------------------------------------------
1137 //----------------------------------------------------------------------
1138 //----------------------------------------------------------------------
1139 //----------------------------------------------------------------------
1140 
1141 void LIRGenerator::do_Phi(Phi* x) {
1142   // phi functions are never visited directly
1143   ShouldNotReachHere();
1144 }
1145 
1146 
1147 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1148 void LIRGenerator::do_Constant(Constant* x) {
1149   if (x->state_before() != nullptr) {
1150     // Any constant with a ValueStack requires patching so emit the patch here
1151     LIR_Opr reg = rlock_result(x);
1152     CodeEmitInfo* info = state_for(x, x->state_before());
1153     __ oop2reg_patch(nullptr, reg, info);
1154   } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1155     if (!x->is_pinned()) {
1156       // unpinned constants are handled specially so that they can be
1157       // put into registers when they are used multiple times within a
1158       // block.  After the block completes their operand will be
1159       // cleared so that other blocks can't refer to that register.
1160       set_result(x, load_constant(x));
1161     } else {
1162       LIR_Opr res = x->operand();
1163       if (!res->is_valid()) {
1164         res = LIR_OprFact::value_type(x->type());
1165       }
1166       if (res->is_constant()) {
1167         LIR_Opr reg = rlock_result(x);
1168         __ move(res, reg);
1169       } else {
1170         set_result(x, res);
1171       }
1172     }
1173   } else {
1174     set_result(x, LIR_OprFact::value_type(x->type()));
1175   }
1176 }
1177 
1178 
1179 void LIRGenerator::do_Local(Local* x) {
1180   // operand_for_instruction has the side effect of setting the result
1181   // so there's no need to do it here.
1182   operand_for_instruction(x);
1183 }
1184 
1185 
1186 void LIRGenerator::do_Return(Return* x) {
1187   if (compilation()->env()->dtrace_method_probes()) {
1188     BasicTypeList signature;
1189     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
1190     signature.append(T_METADATA); // Method*
1191     LIR_OprList* args = new LIR_OprList();
1192     args->append(getThreadPointer());
1193     LIR_Opr meth = new_register(T_METADATA);
1194     __ metadata2reg(method()->constant_encoding(), meth);
1195     args->append(meth);
1196     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, nullptr);
1197   }
1198 
1199   if (x->type()->is_void()) {
1200     __ return_op(LIR_OprFact::illegalOpr);
1201   } else {
1202     LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1203     LIRItem result(x->result(), this);
1204 
1205     result.load_item_force(reg);
1206     __ return_op(result.result());
1207   }
1208   set_no_result(x);
1209 }
1210 
1211 // Example: ref.get()
1212 // Combination of LoadField and g1 pre-write barrier
1213 void LIRGenerator::do_Reference_get0(Intrinsic* x) {
1214 
1215   const int referent_offset = java_lang_ref_Reference::referent_offset();
1216 
1217   assert(x->number_of_arguments() == 1, "wrong type");
1218 
1219   LIRItem reference(x->argument_at(0), this);
1220   reference.load_item();
1221 
1222   // need to perform the null check on the reference object
1223   CodeEmitInfo* info = nullptr;
1224   if (x->needs_null_check()) {
1225     info = state_for(x);
1226   }
1227 
1228   LIR_Opr result = rlock_result(x, T_OBJECT);
1229   access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
1230                  reference, LIR_OprFact::intConst(referent_offset), result,
1231                  nullptr, info);
1232 }
1233 
1234 // Example: clazz.isInstance(object)
1235 void LIRGenerator::do_isInstance(Intrinsic* x) {
1236   assert(x->number_of_arguments() == 2, "wrong type");
1237 
1238   LIRItem clazz(x->argument_at(0), this);
1239   LIRItem object(x->argument_at(1), this);
1240   clazz.load_item();
1241   object.load_item();
1242   LIR_Opr result = rlock_result(x);
1243 
1244   // need to perform null check on clazz
1245   if (x->needs_null_check()) {
1246     CodeEmitInfo* info = state_for(x);
1247     __ null_check(clazz.result(), info);
1248   }
1249 
1250   address pd_instanceof_fn = isInstance_entry();
1251   LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1252                                      pd_instanceof_fn,
1253                                      x->type(),
1254                                      nullptr); // null CodeEmitInfo results in a leaf call
1255   __ move(call_result, result);
1256 }
1257 
1258 void LIRGenerator::load_klass(LIR_Opr obj, LIR_Opr klass, CodeEmitInfo* null_check_info) {
1259   __ load_klass(obj, klass, null_check_info);
1260 }
1261 
1262 // Example: object.getClass ()
1263 void LIRGenerator::do_getClass(Intrinsic* x) {
1264   assert(x->number_of_arguments() == 1, "wrong type");
1265 
1266   LIRItem rcvr(x->argument_at(0), this);
1267   rcvr.load_item();
1268   LIR_Opr temp = new_register(T_ADDRESS);
1269   LIR_Opr result = rlock_result(x);
1270 
1271   // need to perform the null check on the rcvr
1272   CodeEmitInfo* info = nullptr;
1273   if (x->needs_null_check()) {
1274     info = state_for(x);
1275   }
1276 
1277   LIR_Opr klass = new_register(T_METADATA);
1278   load_klass(rcvr.result(), klass, info);
1279   __ move_wide(new LIR_Address(klass, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
1280   // mirror = ((OopHandle)mirror)->resolve();
1281   access_load(IN_NATIVE, T_OBJECT,
1282               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
1283 }
1284 
1285 void LIRGenerator::do_getObjectSize(Intrinsic* x) {
1286   assert(x->number_of_arguments() == 3, "wrong type");
1287   LIR_Opr result_reg = rlock_result(x);
1288 
1289   LIRItem value(x->argument_at(2), this);
1290   value.load_item();
1291 
1292   LIR_Opr klass = new_register(T_METADATA);
1293   load_klass(value.result(), klass, nullptr);
1294   LIR_Opr layout = new_register(T_INT);
1295   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1296 
1297   LabelObj* L_done = new LabelObj();
1298   LabelObj* L_array = new LabelObj();
1299 
1300   __ cmp(lir_cond_lessEqual, layout, 0);
1301   __ branch(lir_cond_lessEqual, L_array->label());
1302 
1303   // Instance case: the layout helper gives us instance size almost directly,
1304   // but we need to mask out the _lh_instance_slow_path_bit.
1305 
1306   assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
1307 
1308   LIR_Opr mask = load_immediate(~(jint) right_n_bits(LogBytesPerLong), T_INT);
1309   __ logical_and(layout, mask, layout);
1310   __ convert(Bytecodes::_i2l, layout, result_reg);
1311 
1312   __ branch(lir_cond_always, L_done->label());
1313 
1314   // Array case: size is round(header + element_size*arraylength).
1315   // Since arraylength is different for every array instance, we have to
1316   // compute the whole thing at runtime.
1317 
1318   __ branch_destination(L_array->label());
1319 
1320   int round_mask = MinObjAlignmentInBytes - 1;
1321 
1322   // Figure out header sizes first.
1323   LIR_Opr hss = load_immediate(Klass::_lh_header_size_shift, T_INT);
1324   LIR_Opr hsm = load_immediate(Klass::_lh_header_size_mask, T_INT);
1325 
1326   LIR_Opr header_size = new_register(T_INT);
1327   __ move(layout, header_size);
1328   LIR_Opr tmp = new_register(T_INT);
1329   __ unsigned_shift_right(header_size, hss, header_size, tmp);
1330   __ logical_and(header_size, hsm, header_size);
1331   __ add(header_size, LIR_OprFact::intConst(round_mask), header_size);
1332 
1333   // Figure out the array length in bytes
1334   assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
1335   LIR_Opr l2esm = load_immediate(Klass::_lh_log2_element_size_mask, T_INT);
1336   __ logical_and(layout, l2esm, layout);
1337 
1338   LIR_Opr length_int = new_register(T_INT);
1339   __ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int);
1340 
1341 #ifdef _LP64
1342   LIR_Opr length = new_register(T_LONG);
1343   __ convert(Bytecodes::_i2l, length_int, length);
1344 #endif
1345 
1346   // Shift-left awkwardness. Normally it is just:
1347   //   __ shift_left(length, layout, length);
1348   // But C1 cannot perform shift_left with non-constant count, so we end up
1349   // doing the per-bit loop dance here. x86_32 also does not know how to shift
1350   // longs, so we have to act on ints.
1351   LabelObj* L_shift_loop = new LabelObj();
1352   LabelObj* L_shift_exit = new LabelObj();
1353 
1354   __ branch_destination(L_shift_loop->label());
1355   __ cmp(lir_cond_equal, layout, 0);
1356   __ branch(lir_cond_equal, L_shift_exit->label());
1357 
1358 #ifdef _LP64
1359   __ shift_left(length, 1, length);
1360 #else
1361   __ shift_left(length_int, 1, length_int);
1362 #endif
1363 
1364   __ sub(layout, LIR_OprFact::intConst(1), layout);
1365 
1366   __ branch(lir_cond_always, L_shift_loop->label());
1367   __ branch_destination(L_shift_exit->label());
1368 
1369   // Mix all up, round, and push to the result.
1370 #ifdef _LP64
1371   LIR_Opr header_size_long = new_register(T_LONG);
1372   __ convert(Bytecodes::_i2l, header_size, header_size_long);
1373   __ add(length, header_size_long, length);
1374   if (round_mask != 0) {
1375     LIR_Opr round_mask_opr = load_immediate(~(jlong)round_mask, T_LONG);
1376     __ logical_and(length, round_mask_opr, length);
1377   }
1378   __ move(length, result_reg);
1379 #else
1380   __ add(length_int, header_size, length_int);
1381   if (round_mask != 0) {
1382     LIR_Opr round_mask_opr = load_immediate(~round_mask, T_INT);
1383     __ logical_and(length_int, round_mask_opr, length_int);
1384   }
1385   __ convert(Bytecodes::_i2l, length_int, result_reg);
1386 #endif
1387 
1388   __ branch_destination(L_done->label());
1389 }
1390 
1391 void LIRGenerator::do_scopedValueCache(Intrinsic* x) {
1392   do_JavaThreadField(x, JavaThread::scopedValueCache_offset());
1393 }
1394 
1395 // Example: Thread.currentCarrierThread()
1396 void LIRGenerator::do_currentCarrierThread(Intrinsic* x) {
1397   do_JavaThreadField(x, JavaThread::threadObj_offset());
1398 }
1399 
1400 void LIRGenerator::do_vthread(Intrinsic* x) {
1401   do_JavaThreadField(x, JavaThread::vthread_offset());
1402 }
1403 
1404 void LIRGenerator::do_JavaThreadField(Intrinsic* x, ByteSize offset) {
1405   assert(x->number_of_arguments() == 0, "wrong type");
1406   LIR_Opr temp = new_register(T_ADDRESS);
1407   LIR_Opr reg = rlock_result(x);
1408   __ move(new LIR_Address(getThreadPointer(), in_bytes(offset), T_ADDRESS), temp);
1409   access_load(IN_NATIVE, T_OBJECT,
1410               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg);
1411 }
1412 
1413 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1414   assert(x->number_of_arguments() == 1, "wrong type");
1415   LIRItem receiver(x->argument_at(0), this);
1416 
1417   receiver.load_item();
1418   BasicTypeList signature;
1419   signature.append(T_OBJECT); // receiver
1420   LIR_OprList* args = new LIR_OprList();
1421   args->append(receiver.result());
1422   CodeEmitInfo* info = state_for(x, x->state());
1423   call_runtime(&signature, args,
1424                CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_register_finalizer_id)),
1425                voidType, info);
1426 
1427   set_no_result(x);
1428 }
1429 
1430 
1431 //------------------------local access--------------------------------------
1432 
1433 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1434   if (x->operand()->is_illegal()) {
1435     Constant* c = x->as_Constant();
1436     if (c != nullptr) {
1437       x->set_operand(LIR_OprFact::value_type(c->type()));
1438     } else {
1439       assert(x->as_Phi() || x->as_Local() != nullptr, "only for Phi and Local");
1440       // allocate a virtual register for this local or phi
1441       x->set_operand(rlock(x));
1442 #ifdef ASSERT
1443       _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, nullptr);
1444 #endif
1445     }
1446   }
1447   return x->operand();
1448 }
1449 
1450 #ifdef ASSERT
1451 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1452   if (reg_num < _instruction_for_operand.length()) {
1453     return _instruction_for_operand.at(reg_num);
1454   }
1455   return nullptr;
1456 }
1457 #endif
1458 
1459 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1460   if (_vreg_flags.size_in_bits() == 0) {
1461     BitMap2D temp(100, num_vreg_flags);
1462     _vreg_flags = temp;
1463   }
1464   _vreg_flags.at_put_grow(vreg_num, f, true);
1465 }
1466 
1467 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1468   if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1469     return false;
1470   }
1471   return _vreg_flags.at(vreg_num, f);
1472 }
1473 
1474 
1475 // Block local constant handling.  This code is useful for keeping
1476 // unpinned constants and constants which aren't exposed in the IR in
1477 // registers.  Unpinned Constant instructions have their operands
1478 // cleared when the block is finished so that other blocks can't end
1479 // up referring to their registers.
1480 
1481 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1482   assert(!x->is_pinned(), "only for unpinned constants");
1483   _unpinned_constants.append(x);
1484   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1485 }
1486 
1487 
1488 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1489   BasicType t = c->type();
1490   for (int i = 0; i < _constants.length() && !in_conditional_code(); i++) {
1491     LIR_Const* other = _constants.at(i);
1492     if (t == other->type()) {
1493       switch (t) {
1494       case T_INT:
1495       case T_FLOAT:
1496         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1497         break;
1498       case T_LONG:
1499       case T_DOUBLE:
1500         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1501         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1502         break;
1503       case T_OBJECT:
1504         if (c->as_jobject() != other->as_jobject()) continue;
1505         break;
1506       default:
1507         break;
1508       }
1509       return _reg_for_constants.at(i);
1510     }
1511   }
1512 
1513   LIR_Opr result = new_register(t);
1514   __ move((LIR_Opr)c, result);
1515   if (!in_conditional_code()) {
1516     _constants.append(c);
1517     _reg_for_constants.append(result);
1518   }
1519   return result;
1520 }
1521 
1522 void LIRGenerator::set_in_conditional_code(bool v) {
1523   assert(v != _in_conditional_code, "must change state");
1524   _in_conditional_code = v;
1525 }
1526 
1527 
1528 //------------------------field access--------------------------------------
1529 
1530 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1531   assert(x->number_of_arguments() == 4, "wrong type");
1532   LIRItem obj   (x->argument_at(0), this);  // object
1533   LIRItem offset(x->argument_at(1), this);  // offset of field
1534   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1535   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1536   assert(obj.type()->tag() == objectTag, "invalid type");
1537   assert(cmp.type()->tag() == type->tag(), "invalid type");
1538   assert(val.type()->tag() == type->tag(), "invalid type");
1539 
1540   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1541                                             obj, offset, cmp, val);
1542   set_result(x, result);
1543 }
1544 
1545 // Returns a int/long value with the null marker bit set
1546 static LIR_Opr null_marker_mask(BasicType bt, ciField* field) {
1547   assert(field->null_marker_offset() != -1, "field does not have null marker");
1548   int nm_offset = field->null_marker_offset() - field->offset_in_bytes();
1549   jlong null_marker = 1ULL << (nm_offset << LogBitsPerByte);
1550   return (bt == T_LONG) ? LIR_OprFact::longConst(null_marker) : LIR_OprFact::intConst(null_marker);
1551 }
1552 
1553 // Comment copied form templateTable_i486.cpp
1554 // ----------------------------------------------------------------------------
1555 // Volatile variables demand their effects be made known to all CPU's in
1556 // order.  Store buffers on most chips allow reads & writes to reorder; the
1557 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1558 // memory barrier (i.e., it's not sufficient that the interpreter does not
1559 // reorder volatile references, the hardware also must not reorder them).
1560 //
1561 // According to the new Java Memory Model (JMM):
1562 // (1) All volatiles are serialized wrt to each other.
1563 // ALSO reads & writes act as acquire & release, so:
1564 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1565 // the read float up to before the read.  It's OK for non-volatile memory refs
1566 // that happen before the volatile read to float down below it.
1567 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1568 // that happen BEFORE the write float down to after the write.  It's OK for
1569 // non-volatile memory refs that happen after the volatile write to float up
1570 // before it.
1571 //
1572 // We only put in barriers around volatile refs (they are expensive), not
1573 // _between_ memory refs (that would require us to track the flavor of the
1574 // previous memory refs).  Requirements (2) and (3) require some barriers
1575 // before volatile stores and after volatile loads.  These nearly cover
1576 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1577 // case is placed after volatile-stores although it could just as well go
1578 // before volatile-loads.
1579 
1580 
1581 void LIRGenerator::do_StoreField(StoreField* x) {
1582   ciField* field = x->field();
1583   bool needs_patching = x->needs_patching();
1584   bool is_volatile = field->is_volatile();
1585   BasicType field_type = x->field_type();
1586 
1587   CodeEmitInfo* info = nullptr;
1588   if (needs_patching) {
1589     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1590     info = state_for(x, x->state_before());
1591   } else if (x->needs_null_check()) {
1592     NullCheck* nc = x->explicit_null_check();
1593     if (nc == nullptr) {
1594       info = state_for(x);
1595     } else {
1596       info = state_for(nc);
1597     }
1598   }
1599 
1600   LIRItem object(x->obj(), this);
1601   LIRItem value(x->value(),  this);
1602 
1603   object.load_item();
1604 
1605   if (field->is_flat()) {
1606     value.load_item();
1607   } else {
1608     if (is_volatile || needs_patching) {
1609       // load item if field is volatile (fewer special cases for volatiles)
1610       // load item if field not initialized
1611       // load item if field not constant
1612       // because of code patching we cannot inline constants
1613       if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1614         value.load_byte_item();
1615       } else  {
1616         value.load_item();
1617       }
1618     } else {
1619       value.load_for_store(field_type);
1620     }
1621   }
1622 
1623   set_no_result(x);
1624 
1625 #ifndef PRODUCT
1626   if (PrintNotLoaded && needs_patching) {
1627     tty->print_cr("   ###class not loaded at store_%s bci %d",
1628                   x->is_static() ?  "static" : "field", x->printable_bci());
1629   }
1630 #endif
1631 
1632   if (x->needs_null_check() &&
1633       (needs_patching ||
1634        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1635     // Emit an explicit null check because the offset is too large.
1636     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1637     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1638     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1639   }
1640 
1641   DecoratorSet decorators = IN_HEAP;
1642   if (is_volatile) {
1643     decorators |= MO_SEQ_CST;
1644   }
1645   if (needs_patching) {
1646     decorators |= C1_NEEDS_PATCHING;
1647   }
1648 
1649   if (field->is_flat()) {
1650     ciInlineKlass* vk = field->type()->as_inline_klass();
1651 
1652 #ifdef ASSERT
1653     assert(field->is_atomic(), "No atomic access required %s.%s", field->holder()->name()->as_utf8(), field->name()->as_utf8());
1654     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
1655     assert(!vk->contains_oops() || !UseZGC, "ZGC does not support embedded oops in flat fields");
1656 #endif
1657 
1658     // Zero the payload
1659     BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
1660     LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
1661     LIR_Opr zero = (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0);
1662     __ move(zero, payload);
1663 
1664     bool is_constant_null = value.is_constant() && value.value()->is_null_obj();
1665     if (!is_constant_null) {
1666       LabelObj* L_isNull = new LabelObj();
1667       bool needs_null_check = !value.is_constant() || value.value()->is_null_obj();
1668       if (needs_null_check) {
1669         __ cmp(lir_cond_equal, value.result(), LIR_OprFact::oopConst(nullptr));
1670         __ branch(lir_cond_equal, L_isNull->label());
1671       }
1672       // Load payload (if not empty) and set null marker (if not null-free)
1673       if (!vk->is_empty()) {
1674         access_load_at(decorators, bt, value, LIR_OprFact::intConst(vk->payload_offset()), payload);
1675       }
1676       if (!field->is_null_free()) {
1677         __ logical_or(payload, null_marker_mask(bt, field), payload);
1678       }
1679       if (needs_null_check) {
1680         __ branch_destination(L_isNull->label());
1681       }
1682     }
1683     access_store_at(decorators, bt, object, LIR_OprFact::intConst(x->offset()), payload,
1684                     // Make sure to emit an implicit null check and pass the information
1685                     // that this is a flat store that might require gc barriers for oop fields.
1686                     info != nullptr ? new CodeEmitInfo(info) : nullptr, info, vk);
1687     return;
1688   }
1689 
1690   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1691                   value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1692 }
1693 
1694 // TODO 8350865 Can we find another way to pass an address to access_load_at()?
1695 class TempResolvedAddress: public Instruction {
1696  public:
1697   TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1698     set_operand(addr);
1699   }
1700   virtual void input_values_do(ValueVisitor*) {}
1701   virtual void visit(InstructionVisitor* v)   {}
1702   virtual const char* name() const  { return "TempResolvedAddress"; }
1703 };
1704 
1705 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
1706 #ifndef _LP64
1707   // We need to be careful with overflows in 32-bit arithmetic
1708   Unimplemented();
1709 #endif
1710   ciType* array_type = array.value()->declared_type();
1711   ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
1712   assert(flat_array_klass->is_loaded(), "must be");
1713 
1714   int array_header_size = flat_array_klass->array_header_in_bytes();
1715   int shift = flat_array_klass->log2_element_size();
1716 
1717   LIR_Opr index_op = new_register(T_LONG);
1718   if (index.result()->is_constant()) {
1719     jint const_index = index.result()->as_jint();
1720     __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1721   } else {
1722     __ convert(Bytecodes::_i2l, index.result(), index_op);
1723     // Need to shift manually, as LIR_Address can scale only up to 3.
1724     __ shift_left(index_op, shift, index_op);
1725   }
1726 
1727   LIR_Opr elm_op = new_pointer_register();
1728   LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
1729   __ leal(LIR_OprFact::address(elm_address), elm_op);
1730   return elm_op;
1731 }
1732 
1733 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, size_t sub_offset) {
1734   assert(field != nullptr, "Need a subelement type specified");
1735 
1736   // Find the starting address of the source (inside the array)
1737   LIR_Opr elm_op = get_and_load_element_address(array, index);
1738 
1739   BasicType subelt_type = field->type()->basic_type();
1740   TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op);
1741   LIRItem elm_item(elm_resolved_addr, this);
1742 
1743   DecoratorSet decorators = IN_HEAP;
1744   access_load_at(decorators, subelt_type,
1745                      elm_item, LIR_OprFact::longConst(sub_offset), result,
1746                      nullptr, nullptr);
1747 }
1748 
1749 void LIRGenerator::access_flat_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
1750                                           ciField* field, size_t sub_offset) {
1751   assert(sub_offset == 0 || field != nullptr, "Sanity check");
1752 
1753   // Find the starting address of the source (inside the array)
1754   LIR_Opr elm_op = get_and_load_element_address(array, index);
1755 
1756   ciInlineKlass* elem_klass = nullptr;
1757   if (field != nullptr) {
1758     elem_klass = field->type()->as_inline_klass();
1759   } else {
1760     elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
1761   }
1762   for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1763     ciField* inner_field = elem_klass->nonstatic_field_at(i);
1764     assert(!inner_field->is_flat(), "flat fields must have been expanded");
1765     int obj_offset = inner_field->offset_in_bytes();
1766     size_t elm_offset = obj_offset - elem_klass->payload_offset() + sub_offset; // object header is not stored in array.
1767     BasicType field_type = inner_field->type()->basic_type();
1768 
1769     // Types which are smaller than int are still passed in an int register.
1770     BasicType reg_type = field_type;
1771     switch (reg_type) {
1772     case T_BYTE:
1773     case T_BOOLEAN:
1774     case T_SHORT:
1775     case T_CHAR:
1776       reg_type = T_INT;
1777       break;
1778     default:
1779       break;
1780     }
1781 
1782     LIR_Opr temp = new_register(reg_type);
1783     TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1784     LIRItem elm_item(elm_resolved_addr, this);
1785 
1786     DecoratorSet decorators = IN_HEAP;
1787     if (is_load) {
1788       access_load_at(decorators, field_type,
1789                      elm_item, LIR_OprFact::longConst(elm_offset), temp,
1790                      nullptr, nullptr);
1791       access_store_at(decorators, field_type,
1792                       obj_item, LIR_OprFact::intConst(obj_offset), temp,
1793                       nullptr, nullptr);
1794     } else {
1795       access_load_at(decorators, field_type,
1796                      obj_item, LIR_OprFact::intConst(obj_offset), temp,
1797                      nullptr, nullptr);
1798       access_store_at(decorators, field_type,
1799                       elm_item, LIR_OprFact::longConst(elm_offset), temp,
1800                       nullptr, nullptr);
1801     }
1802   }
1803 }
1804 
1805 void LIRGenerator::check_flat_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
1806   LIR_Opr tmp = new_register(T_METADATA);
1807   __ check_flat_array(array, value, tmp, slow_path);
1808 }
1809 
1810 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1811   LabelObj* L_end = new LabelObj();
1812   LIR_Opr tmp = new_register(T_METADATA);
1813   __ check_null_free_array(array.result(), tmp);
1814   __ branch(lir_cond_equal, L_end->label());
1815   __ null_check(value.result(), info);
1816   __ branch_destination(L_end->label());
1817 }
1818 
1819 bool LIRGenerator::needs_flat_array_store_check(StoreIndexed* x) {
1820   if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
1821     ciType* type = x->value()->declared_type();
1822     if (type != nullptr && type->is_klass()) {
1823       ciKlass* klass = type->as_klass();
1824       if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->maybe_flat_in_array())) {
1825         // This is known to be a non-flat object. If the array is a flat array,
1826         // it will be caught by the code generated by array_store_check().
1827         return false;
1828       }
1829     }
1830     // We're not 100% sure, so let's do the flat_array_store_check.
1831     return true;
1832   }
1833   return false;
1834 }
1835 
1836 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1837   return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1838 }
1839 
1840 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1841   assert(x->is_pinned(),"");
1842   assert(x->elt_type() != T_ARRAY, "never used");
1843   bool is_loaded_flat_array = x->array()->is_loaded_flat_array();
1844   bool needs_range_check = x->compute_needs_range_check();
1845   bool use_length = x->length() != nullptr;
1846   bool obj_store = is_reference_type(x->elt_type());
1847   bool needs_store_check = obj_store && !(is_loaded_flat_array && x->is_exact_flat_array_store()) &&
1848                                         (x->value()->as_Constant() == nullptr ||
1849                                          !get_jobject_constant(x->value())->is_null_object());
1850 
1851   LIRItem array(x->array(), this);
1852   LIRItem index(x->index(), this);
1853   LIRItem value(x->value(), this);
1854   LIRItem length(this);
1855 
1856   array.load_item();
1857   index.load_nonconstant();
1858 
1859   if (use_length && needs_range_check) {
1860     length.set_instruction(x->length());
1861     length.load_item();
1862   }
1863 
1864   if (needs_store_check || x->check_boolean()
1865       || is_loaded_flat_array || needs_flat_array_store_check(x) || needs_null_free_array_store_check(x)) {
1866     value.load_item();
1867   } else {
1868     value.load_for_store(x->elt_type());
1869   }
1870 
1871   set_no_result(x);
1872 
1873   // the CodeEmitInfo must be duplicated for each different
1874   // LIR-instruction because spilling can occur anywhere between two
1875   // instructions and so the debug information must be different
1876   CodeEmitInfo* range_check_info = state_for(x);
1877   CodeEmitInfo* null_check_info = nullptr;
1878   if (x->needs_null_check()) {
1879     null_check_info = new CodeEmitInfo(range_check_info);
1880   }
1881 
1882   if (needs_range_check) {
1883     if (use_length) {
1884       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1885       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1886     } else {
1887       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1888       // range_check also does the null check
1889       null_check_info = nullptr;
1890     }
1891   }
1892 
1893   if (GenerateArrayStoreCheck && needs_store_check) {
1894     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1895     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1896   }
1897 
1898   if (x->should_profile()) {
1899     if (is_loaded_flat_array) {
1900       // No need to profile a store to a flat array of known type. This can happen if
1901       // the type only became known after optimizations (for example, after the PhiSimplifier).
1902       x->set_should_profile(false);
1903     } else {
1904       int bci = x->profiled_bci();
1905       ciMethodData* md = x->profiled_method()->method_data();
1906       assert(md != nullptr, "Sanity");
1907       ciProfileData* data = md->bci_to_data(bci);
1908       assert(data != nullptr && data->is_ArrayStoreData(), "incorrect profiling entry");
1909       ciArrayStoreData* store_data = (ciArrayStoreData*)data;
1910       profile_array_type(x, md, store_data);
1911       assert(store_data->is_ArrayStoreData(), "incorrect profiling entry");
1912       if (x->array()->maybe_null_free_array()) {
1913         profile_null_free_array(array, md, data);
1914       }
1915     }
1916   }
1917 
1918   if (is_loaded_flat_array) {
1919     // TODO 8350865 This is currently dead code and still assumes that flat arrays are null-free
1920     if (!x->value()->is_null_free()) {
1921       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1922     }
1923     // If array element is an empty inline type, no need to copy anything
1924     if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
1925       access_flat_array(false, array, index, value);
1926     }
1927   } else {
1928     StoreFlattenedArrayStub* slow_path = nullptr;
1929 
1930     if (needs_flat_array_store_check(x)) {
1931       // Check if we indeed have a flat array
1932       index.load_item();
1933       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
1934       check_flat_array(array.result(), value.result(), slow_path);
1935       set_in_conditional_code(true);
1936     } else if (needs_null_free_array_store_check(x)) {
1937       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1938       check_null_free_array(array, value, info);
1939     }
1940 
1941     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1942     if (x->check_boolean()) {
1943       decorators |= C1_MASK_BOOLEAN;
1944     }
1945 
1946     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), nullptr, null_check_info);
1947     if (slow_path != nullptr) {
1948       __ branch_destination(slow_path->continuation());
1949       set_in_conditional_code(false);
1950     }
1951   }
1952 }
1953 
1954 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1955                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1956                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1957   decorators |= ACCESS_READ;
1958   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1959   if (access.is_raw()) {
1960     _barrier_set->BarrierSetC1::load_at(access, result);
1961   } else {
1962     _barrier_set->load_at(access, result);
1963   }
1964 }
1965 
1966 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1967                                LIR_Opr addr, LIR_Opr result) {
1968   decorators |= ACCESS_READ;
1969   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1970   access.set_resolved_addr(addr);
1971   if (access.is_raw()) {
1972     _barrier_set->BarrierSetC1::load(access, result);
1973   } else {
1974     _barrier_set->load(access, result);
1975   }
1976 }
1977 
1978 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1979                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
1980                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info,
1981                                    ciInlineKlass* vk) {
1982   decorators |= ACCESS_WRITE;
1983   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info, vk);
1984   if (access.is_raw()) {
1985     _barrier_set->BarrierSetC1::store_at(access, value);
1986   } else {
1987     _barrier_set->store_at(access, value);
1988   }
1989 }
1990 
1991 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1992                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1993   decorators |= ACCESS_READ;
1994   decorators |= ACCESS_WRITE;
1995   // Atomic operations are SEQ_CST by default
1996   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1997   LIRAccess access(this, decorators, base, offset, type);
1998   if (access.is_raw()) {
1999     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
2000   } else {
2001     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
2002   }
2003 }
2004 
2005 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
2006                                             LIRItem& base, LIRItem& offset, LIRItem& value) {
2007   decorators |= ACCESS_READ;
2008   decorators |= ACCESS_WRITE;
2009   // Atomic operations are SEQ_CST by default
2010   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2011   LIRAccess access(this, decorators, base, offset, type);
2012   if (access.is_raw()) {
2013     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
2014   } else {
2015     return _barrier_set->atomic_xchg_at(access, value);
2016   }
2017 }
2018 
2019 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
2020                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
2021   decorators |= ACCESS_READ;
2022   decorators |= ACCESS_WRITE;
2023   // Atomic operations are SEQ_CST by default
2024   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2025   LIRAccess access(this, decorators, base, offset, type);
2026   if (access.is_raw()) {
2027     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
2028   } else {
2029     return _barrier_set->atomic_add_at(access, value);
2030   }
2031 }
2032 
2033 void LIRGenerator::do_LoadField(LoadField* x) {
2034   ciField* field = x->field();
2035   bool needs_patching = x->needs_patching();
2036   bool is_volatile = field->is_volatile();
2037   BasicType field_type = x->field_type();
2038 
2039   CodeEmitInfo* info = nullptr;
2040   if (needs_patching) {
2041     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
2042     info = state_for(x, x->state_before());
2043   } else if (x->needs_null_check()) {
2044     NullCheck* nc = x->explicit_null_check();
2045     if (nc == nullptr) {
2046       info = state_for(x);
2047     } else {
2048       info = state_for(nc);
2049     }
2050   }
2051 
2052   LIRItem object(x->obj(), this);
2053 
2054   object.load_item();
2055 
2056 #ifndef PRODUCT
2057   if (PrintNotLoaded && needs_patching) {
2058     tty->print_cr("   ###class not loaded at load_%s bci %d",
2059                   x->is_static() ?  "static" : "field", x->printable_bci());
2060   }
2061 #endif
2062 
2063   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
2064   if (x->needs_null_check() &&
2065       (needs_patching ||
2066        MacroAssembler::needs_explicit_null_check(x->offset()) ||
2067        stress_deopt)) {
2068     LIR_Opr obj = object.result();
2069     if (stress_deopt) {
2070       obj = new_register(T_OBJECT);
2071       __ move(LIR_OprFact::oopConst(nullptr), obj);
2072     }
2073     // Emit an explicit null check because the offset is too large.
2074     // If the class is not loaded and the object is null, we need to deoptimize to throw a
2075     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2076     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2077   }
2078 
2079   DecoratorSet decorators = IN_HEAP;
2080   if (is_volatile) {
2081     decorators |= MO_SEQ_CST;
2082   }
2083   if (needs_patching) {
2084     decorators |= C1_NEEDS_PATCHING;
2085   }
2086 
2087   if (field->is_flat()) {
2088     ciInlineKlass* vk = field->type()->as_inline_klass();
2089 #ifdef ASSERT
2090     assert(field->is_atomic(), "No atomic access required");
2091     assert(x->state_before() != nullptr, "Needs state before");
2092 #endif
2093 
2094     // Allocate buffer (we can't easily do this conditionally on the null check below
2095     // because branches added in the LIR are opaque to the register allocator).
2096     NewInstance* buffer = new NewInstance(vk, x->state_before(), false, true);
2097     do_NewInstance(buffer);
2098     LIRItem dest(buffer, this);
2099 
2100     // Copy the payload to the buffer
2101     BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
2102     LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
2103     access_load_at(decorators, bt, object, LIR_OprFact::intConst(field->offset_in_bytes()), payload,
2104                    // Make sure to emit an implicit null check
2105                    info ? new CodeEmitInfo(info) : nullptr, info);
2106     access_store_at(decorators, bt, dest, LIR_OprFact::intConst(vk->payload_offset()), payload);
2107 
2108     if (field->is_null_free()) {
2109       set_result(x, buffer->operand());
2110     } else {
2111       // Check the null marker and set result to null if it's not set
2112       __ logical_and(payload, null_marker_mask(bt, field), payload);
2113       __ cmp(lir_cond_equal, payload, (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0));
2114       __ cmove(lir_cond_equal, LIR_OprFact::oopConst(nullptr), buffer->operand(), rlock_result(x), T_OBJECT);
2115     }
2116 
2117     // Ensure the copy is visible before any subsequent store that publishes the buffer.
2118     __ membar_storestore();
2119     return;
2120   }
2121 
2122   LIR_Opr result = rlock_result(x, field_type);
2123   access_load_at(decorators, field_type,
2124                  object, LIR_OprFact::intConst(x->offset()), result,
2125                  info ? new CodeEmitInfo(info) : nullptr, info);
2126 }
2127 
2128 // int/long jdk.internal.util.Preconditions.checkIndex
2129 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
2130   assert(x->number_of_arguments() == 3, "wrong type");
2131   LIRItem index(x->argument_at(0), this);
2132   LIRItem length(x->argument_at(1), this);
2133   LIRItem oobef(x->argument_at(2), this);
2134 
2135   index.load_item();
2136   length.load_item();
2137   oobef.load_item();
2138 
2139   LIR_Opr result = rlock_result(x);
2140   // x->state() is created from copy_state_for_exception, it does not contains arguments
2141   // we should prepare them before entering into interpreter mode due to deoptimization.
2142   ValueStack* state = x->state();
2143   for (int i = 0; i < x->number_of_arguments(); i++) {
2144     Value arg = x->argument_at(i);
2145     state->push(arg->type(), arg);
2146   }
2147   CodeEmitInfo* info = state_for(x, state);
2148 
2149   LIR_Opr len = length.result();
2150   LIR_Opr zero;
2151   if (type == T_INT) {
2152     zero = LIR_OprFact::intConst(0);
2153     if (length.result()->is_constant()){
2154       len = LIR_OprFact::intConst(length.result()->as_jint());
2155     }
2156   } else {
2157     assert(type == T_LONG, "sanity check");
2158     zero = LIR_OprFact::longConst(0);
2159     if (length.result()->is_constant()){
2160       len = LIR_OprFact::longConst(length.result()->as_jlong());
2161     }
2162   }
2163   // C1 can not handle the case that comparing index with constant value while condition
2164   // is neither lir_cond_equal nor lir_cond_notEqual, see LIR_Assembler::comp_op.
2165   LIR_Opr zero_reg = new_register(type);
2166   __ move(zero, zero_reg);
2167 #if defined(X86) && !defined(_LP64)
2168   // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
2169   LIR_Opr index_copy = new_register(index.type());
2170   // index >= 0
2171   __ move(index.result(), index_copy);
2172   __ cmp(lir_cond_less, index_copy, zero_reg);
2173   __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2174                                                     Deoptimization::Action_make_not_entrant));
2175   // index < length
2176   __ move(index.result(), index_copy);
2177   __ cmp(lir_cond_greaterEqual, index_copy, len);
2178   __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2179                                                             Deoptimization::Action_make_not_entrant));
2180 #else
2181   // index >= 0
2182   __ cmp(lir_cond_less, index.result(), zero_reg);
2183   __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2184                                                     Deoptimization::Action_make_not_entrant));
2185   // index < length
2186   __ cmp(lir_cond_greaterEqual, index.result(), len);
2187   __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2188                                                             Deoptimization::Action_make_not_entrant));
2189 #endif
2190   __ move(index.result(), result);
2191 }
2192 
2193 //------------------------array access--------------------------------------
2194 
2195 
2196 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
2197   LIRItem array(x->array(), this);
2198   array.load_item();
2199   LIR_Opr reg = rlock_result(x);
2200 
2201   CodeEmitInfo* info = nullptr;
2202   if (x->needs_null_check()) {
2203     NullCheck* nc = x->explicit_null_check();
2204     if (nc == nullptr) {
2205       info = state_for(x);
2206     } else {
2207       info = state_for(nc);
2208     }
2209     if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
2210       LIR_Opr obj = new_register(T_OBJECT);
2211       __ move(LIR_OprFact::oopConst(nullptr), obj);
2212       __ null_check(obj, new CodeEmitInfo(info));
2213     }
2214   }
2215   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
2216 }
2217 
2218 
2219 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
2220   bool use_length = x->length() != nullptr;
2221   LIRItem array(x->array(), this);
2222   LIRItem index(x->index(), this);
2223   LIRItem length(this);
2224   bool needs_range_check = x->compute_needs_range_check();
2225 
2226   if (use_length && needs_range_check) {
2227     length.set_instruction(x->length());
2228     length.load_item();
2229   }
2230 
2231   array.load_item();
2232   if (index.is_constant() && can_inline_as_constant(x->index())) {
2233     // let it be a constant
2234     index.dont_load_item();
2235   } else {
2236     index.load_item();
2237   }
2238 
2239   CodeEmitInfo* range_check_info = state_for(x);
2240   CodeEmitInfo* null_check_info = nullptr;
2241   if (x->needs_null_check()) {
2242     NullCheck* nc = x->explicit_null_check();
2243     if (nc != nullptr) {
2244       null_check_info = state_for(nc);
2245     } else {
2246       null_check_info = range_check_info;
2247     }
2248     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
2249       LIR_Opr obj = new_register(T_OBJECT);
2250       __ move(LIR_OprFact::oopConst(nullptr), obj);
2251       __ null_check(obj, new CodeEmitInfo(null_check_info));
2252     }
2253   }
2254 
2255   if (needs_range_check) {
2256     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2257       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2258     } else if (use_length) {
2259       // TODO: use a (modified) version of array_range_check that does not require a
2260       //       constant length to be loaded to a register
2261       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2262       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2263     } else {
2264       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2265       // The range check performs the null check, so clear it out for the load
2266       null_check_info = nullptr;
2267     }
2268   }
2269 
2270   ciMethodData* md = nullptr;
2271   ciProfileData* data = nullptr;
2272   if (x->should_profile()) {
2273     if (x->array()->is_loaded_flat_array()) {
2274       // No need to profile a load from a flat array of known type. This can happen if
2275       // the type only became known after optimizations (for example, after the PhiSimplifier).
2276       x->set_should_profile(false);
2277     } else {
2278       int bci = x->profiled_bci();
2279       md = x->profiled_method()->method_data();
2280       assert(md != nullptr, "Sanity");
2281       data = md->bci_to_data(bci);
2282       assert(data != nullptr && data->is_ArrayLoadData(), "incorrect profiling entry");
2283       ciArrayLoadData* load_data = (ciArrayLoadData*)data;
2284       profile_array_type(x, md, load_data);
2285     }
2286   }
2287 
2288   Value element = nullptr;
2289   if (x->vt() != nullptr) {
2290     assert(x->array()->is_loaded_flat_array(), "must be");
2291     // Find the destination address (of the NewInlineTypeInstance).
2292     LIRItem obj_item(x->vt(), this);
2293 
2294     access_flat_array(true, array, index, obj_item,
2295                       x->delayed() == nullptr ? nullptr : x->delayed()->field(),
2296                       x->delayed() == nullptr ? 0 : x->delayed()->offset());
2297     set_no_result(x);
2298   } else if (x->delayed() != nullptr) {
2299     assert(x->array()->is_loaded_flat_array(), "must be");
2300     LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
2301     access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
2302   } else {
2303     LIR_Opr result = rlock_result(x, x->elt_type());
2304     LoadFlattenedArrayStub* slow_path = nullptr;
2305 
2306     if (x->should_profile() && x->array()->maybe_null_free_array()) {
2307       profile_null_free_array(array, md, data);
2308     }
2309 
2310     if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
2311       assert(x->delayed() == nullptr, "Delayed LoadIndexed only apply to loaded_flat_arrays");
2312       index.load_item();
2313       // if we are loading from a flat array, load it using a runtime call
2314       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
2315       check_flat_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
2316       set_in_conditional_code(true);
2317     }
2318 
2319     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2320     access_load_at(decorators, x->elt_type(),
2321                    array, index.result(), result,
2322                    nullptr, null_check_info);
2323 
2324     if (slow_path != nullptr) {
2325       __ branch_destination(slow_path->continuation());
2326       set_in_conditional_code(false);
2327     }
2328 
2329     element = x;
2330   }
2331 
2332   if (x->should_profile()) {
2333     profile_element_type(element, md, (ciArrayLoadData*)data);
2334   }
2335 }
2336 
2337 
2338 void LIRGenerator::do_NullCheck(NullCheck* x) {
2339   if (x->can_trap()) {
2340     LIRItem value(x->obj(), this);
2341     value.load_item();
2342     CodeEmitInfo* info = state_for(x);
2343     __ null_check(value.result(), info);
2344   }
2345 }
2346 
2347 
2348 void LIRGenerator::do_TypeCast(TypeCast* x) {
2349   LIRItem value(x->obj(), this);
2350   value.load_item();
2351   // the result is the same as from the node we are casting
2352   set_result(x, value.result());
2353 }
2354 
2355 
2356 void LIRGenerator::do_Throw(Throw* x) {
2357   LIRItem exception(x->exception(), this);
2358   exception.load_item();
2359   set_no_result(x);
2360   LIR_Opr exception_opr = exception.result();
2361   CodeEmitInfo* info = state_for(x, x->state());
2362 
2363 #ifndef PRODUCT
2364   if (PrintC1Statistics) {
2365     increment_counter(Runtime1::throw_count_address(), T_INT);
2366   }
2367 #endif
2368 
2369   // check if the instruction has an xhandler in any of the nested scopes
2370   bool unwind = false;
2371   if (info->exception_handlers()->length() == 0) {
2372     // this throw is not inside an xhandler
2373     unwind = true;
2374   } else {
2375     // get some idea of the throw type
2376     bool type_is_exact = true;
2377     ciType* throw_type = x->exception()->exact_type();
2378     if (throw_type == nullptr) {
2379       type_is_exact = false;
2380       throw_type = x->exception()->declared_type();
2381     }
2382     if (throw_type != nullptr && throw_type->is_instance_klass()) {
2383       ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2384       unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2385     }
2386   }
2387 
2388   // do null check before moving exception oop into fixed register
2389   // to avoid a fixed interval with an oop during the null check.
2390   // Use a copy of the CodeEmitInfo because debug information is
2391   // different for null_check and throw.
2392   if (x->exception()->as_NewInstance() == nullptr && x->exception()->as_ExceptionObject() == nullptr) {
2393     // if the exception object wasn't created using new then it might be null.
2394     __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2395   }
2396 
2397   if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2398     // we need to go through the exception lookup path to get JVMTI
2399     // notification done
2400     unwind = false;
2401   }
2402 
2403   // move exception oop into fixed register
2404   __ move(exception_opr, exceptionOopOpr());
2405 
2406   if (unwind) {
2407     __ unwind_exception(exceptionOopOpr());
2408   } else {
2409     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2410   }
2411 }
2412 
2413 
2414 void LIRGenerator::do_UnsafeGet(UnsafeGet* x) {
2415   BasicType type = x->basic_type();
2416   LIRItem src(x->object(), this);
2417   LIRItem off(x->offset(), this);
2418 
2419   off.load_item();
2420   src.load_item();
2421 
2422   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2423 
2424   if (x->is_volatile()) {
2425     decorators |= MO_SEQ_CST;
2426   }
2427   if (type == T_BOOLEAN) {
2428     decorators |= C1_MASK_BOOLEAN;
2429   }
2430   if (is_reference_type(type)) {
2431     decorators |= ON_UNKNOWN_OOP_REF;
2432   }
2433 
2434   LIR_Opr result = rlock_result(x, type);
2435   if (!x->is_raw()) {
2436     access_load_at(decorators, type, src, off.result(), result);
2437   } else {
2438     // Currently it is only used in GraphBuilder::setup_osr_entry_block.
2439     // It reads the value from [src + offset] directly.
2440 #ifdef _LP64
2441     LIR_Opr offset = new_register(T_LONG);
2442     __ convert(Bytecodes::_i2l, off.result(), offset);
2443 #else
2444     LIR_Opr offset = off.result();
2445 #endif
2446     LIR_Address* addr = new LIR_Address(src.result(), offset, type);
2447     if (is_reference_type(type)) {
2448       __ move_wide(addr, result);
2449     } else {
2450       __ move(addr, result);
2451     }
2452   }
2453 }
2454 
2455 
2456 void LIRGenerator::do_UnsafePut(UnsafePut* x) {
2457   BasicType type = x->basic_type();
2458   LIRItem src(x->object(), this);
2459   LIRItem off(x->offset(), this);
2460   LIRItem data(x->value(), this);
2461 
2462   src.load_item();
2463   if (type == T_BOOLEAN || type == T_BYTE) {
2464     data.load_byte_item();
2465   } else {
2466     data.load_item();
2467   }
2468   off.load_item();
2469 
2470   set_no_result(x);
2471 
2472   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2473   if (is_reference_type(type)) {
2474     decorators |= ON_UNKNOWN_OOP_REF;
2475   }
2476   if (x->is_volatile()) {
2477     decorators |= MO_SEQ_CST;
2478   }
2479   access_store_at(decorators, type, src, off.result(), data.result());
2480 }
2481 
2482 void LIRGenerator::do_UnsafeGetAndSet(UnsafeGetAndSet* x) {
2483   BasicType type = x->basic_type();
2484   LIRItem src(x->object(), this);
2485   LIRItem off(x->offset(), this);
2486   LIRItem value(x->value(), this);
2487 
2488   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
2489 
2490   if (is_reference_type(type)) {
2491     decorators |= ON_UNKNOWN_OOP_REF;
2492   }
2493 
2494   LIR_Opr result;
2495   if (x->is_add()) {
2496     result = access_atomic_add_at(decorators, type, src, off, value);
2497   } else {
2498     result = access_atomic_xchg_at(decorators, type, src, off, value);
2499   }
2500   set_result(x, result);
2501 }
2502 
2503 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2504   int lng = x->length();
2505 
2506   for (int i = 0; i < lng; i++) {
2507     C1SwitchRange* one_range = x->at(i);
2508     int low_key = one_range->low_key();
2509     int high_key = one_range->high_key();
2510     BlockBegin* dest = one_range->sux();
2511     if (low_key == high_key) {
2512       __ cmp(lir_cond_equal, value, low_key);
2513       __ branch(lir_cond_equal, dest);
2514     } else if (high_key - low_key == 1) {
2515       __ cmp(lir_cond_equal, value, low_key);
2516       __ branch(lir_cond_equal, dest);
2517       __ cmp(lir_cond_equal, value, high_key);
2518       __ branch(lir_cond_equal, dest);
2519     } else {
2520       LabelObj* L = new LabelObj();
2521       __ cmp(lir_cond_less, value, low_key);
2522       __ branch(lir_cond_less, L->label());
2523       __ cmp(lir_cond_lessEqual, value, high_key);
2524       __ branch(lir_cond_lessEqual, dest);
2525       __ branch_destination(L->label());
2526     }
2527   }
2528   __ jump(default_sux);
2529 }
2530 
2531 
2532 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2533   SwitchRangeList* res = new SwitchRangeList();
2534   int len = x->length();
2535   if (len > 0) {
2536     BlockBegin* sux = x->sux_at(0);
2537     int low = x->lo_key();
2538     BlockBegin* default_sux = x->default_sux();
2539     C1SwitchRange* range = new C1SwitchRange(low, sux);
2540     for (int i = 0; i < len; i++) {
2541       int key = low + i;
2542       BlockBegin* new_sux = x->sux_at(i);
2543       if (sux == new_sux) {
2544         // still in same range
2545         range->set_high_key(key);
2546       } else {
2547         // skip tests which explicitly dispatch to the default
2548         if (sux != default_sux) {
2549           res->append(range);
2550         }
2551         range = new C1SwitchRange(key, new_sux);
2552       }
2553       sux = new_sux;
2554     }
2555     if (res->length() == 0 || res->last() != range)  res->append(range);
2556   }
2557   return res;
2558 }
2559 
2560 
2561 // we expect the keys to be sorted by increasing value
2562 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2563   SwitchRangeList* res = new SwitchRangeList();
2564   int len = x->length();
2565   if (len > 0) {
2566     BlockBegin* default_sux = x->default_sux();
2567     int key = x->key_at(0);
2568     BlockBegin* sux = x->sux_at(0);
2569     C1SwitchRange* range = new C1SwitchRange(key, sux);
2570     for (int i = 1; i < len; i++) {
2571       int new_key = x->key_at(i);
2572       BlockBegin* new_sux = x->sux_at(i);
2573       if (key+1 == new_key && sux == new_sux) {
2574         // still in same range
2575         range->set_high_key(new_key);
2576       } else {
2577         // skip tests which explicitly dispatch to the default
2578         if (range->sux() != default_sux) {
2579           res->append(range);
2580         }
2581         range = new C1SwitchRange(new_key, new_sux);
2582       }
2583       key = new_key;
2584       sux = new_sux;
2585     }
2586     if (res->length() == 0 || res->last() != range)  res->append(range);
2587   }
2588   return res;
2589 }
2590 
2591 
2592 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2593   LIRItem tag(x->tag(), this);
2594   tag.load_item();
2595   set_no_result(x);
2596 
2597   if (x->is_safepoint()) {
2598     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2599   }
2600 
2601   // move values into phi locations
2602   move_to_phi(x->state());
2603 
2604   int lo_key = x->lo_key();
2605   int len = x->length();
2606   assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
2607   LIR_Opr value = tag.result();
2608 
2609   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2610     ciMethod* method = x->state()->scope()->method();
2611     ciMethodData* md = method->method_data_or_null();
2612     assert(md != nullptr, "Sanity");
2613     ciProfileData* data = md->bci_to_data(x->state()->bci());
2614     assert(data != nullptr, "must have profiling data");
2615     assert(data->is_MultiBranchData(), "bad profile data?");
2616     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2617     LIR_Opr md_reg = new_register(T_METADATA);
2618     __ metadata2reg(md->constant_encoding(), md_reg);
2619     LIR_Opr data_offset_reg = new_pointer_register();
2620     LIR_Opr tmp_reg = new_pointer_register();
2621 
2622     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2623     for (int i = 0; i < len; i++) {
2624       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2625       __ cmp(lir_cond_equal, value, i + lo_key);
2626       __ move(data_offset_reg, tmp_reg);
2627       __ cmove(lir_cond_equal,
2628                LIR_OprFact::intptrConst(count_offset),
2629                tmp_reg,
2630                data_offset_reg, T_INT);
2631     }
2632 
2633     LIR_Opr data_reg = new_pointer_register();
2634     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2635     __ move(data_addr, data_reg);
2636     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2637     __ move(data_reg, data_addr);
2638   }
2639 
2640   if (UseTableRanges) {
2641     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2642   } else {
2643     for (int i = 0; i < len; i++) {
2644       __ cmp(lir_cond_equal, value, i + lo_key);
2645       __ branch(lir_cond_equal, x->sux_at(i));
2646     }
2647     __ jump(x->default_sux());
2648   }
2649 }
2650 
2651 
2652 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2653   LIRItem tag(x->tag(), this);
2654   tag.load_item();
2655   set_no_result(x);
2656 
2657   if (x->is_safepoint()) {
2658     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2659   }
2660 
2661   // move values into phi locations
2662   move_to_phi(x->state());
2663 
2664   LIR_Opr value = tag.result();
2665   int len = x->length();
2666 
2667   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2668     ciMethod* method = x->state()->scope()->method();
2669     ciMethodData* md = method->method_data_or_null();
2670     assert(md != nullptr, "Sanity");
2671     ciProfileData* data = md->bci_to_data(x->state()->bci());
2672     assert(data != nullptr, "must have profiling data");
2673     assert(data->is_MultiBranchData(), "bad profile data?");
2674     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2675     LIR_Opr md_reg = new_register(T_METADATA);
2676     __ metadata2reg(md->constant_encoding(), md_reg);
2677     LIR_Opr data_offset_reg = new_pointer_register();
2678     LIR_Opr tmp_reg = new_pointer_register();
2679 
2680     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2681     for (int i = 0; i < len; i++) {
2682       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2683       __ cmp(lir_cond_equal, value, x->key_at(i));
2684       __ move(data_offset_reg, tmp_reg);
2685       __ cmove(lir_cond_equal,
2686                LIR_OprFact::intptrConst(count_offset),
2687                tmp_reg,
2688                data_offset_reg, T_INT);
2689     }
2690 
2691     LIR_Opr data_reg = new_pointer_register();
2692     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2693     __ move(data_addr, data_reg);
2694     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2695     __ move(data_reg, data_addr);
2696   }
2697 
2698   if (UseTableRanges) {
2699     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2700   } else {
2701     int len = x->length();
2702     for (int i = 0; i < len; i++) {
2703       __ cmp(lir_cond_equal, value, x->key_at(i));
2704       __ branch(lir_cond_equal, x->sux_at(i));
2705     }
2706     __ jump(x->default_sux());
2707   }
2708 }
2709 
2710 
2711 void LIRGenerator::do_Goto(Goto* x) {
2712   set_no_result(x);
2713 
2714   if (block()->next()->as_OsrEntry()) {
2715     // need to free up storage used for OSR entry point
2716     LIR_Opr osrBuffer = block()->next()->operand();
2717     BasicTypeList signature;
2718     signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2719     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2720     __ move(osrBuffer, cc->args()->at(0));
2721     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2722                          getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2723   }
2724 
2725   if (x->is_safepoint()) {
2726     ValueStack* state = x->state_before() ? x->state_before() : x->state();
2727 
2728     // increment backedge counter if needed
2729     CodeEmitInfo* info = state_for(x, state);
2730     increment_backedge_counter(info, x->profiled_bci());
2731     CodeEmitInfo* safepoint_info = state_for(x, state);
2732     __ safepoint(safepoint_poll_register(), safepoint_info);
2733   }
2734 
2735   // Gotos can be folded Ifs, handle this case.
2736   if (x->should_profile()) {
2737     ciMethod* method = x->profiled_method();
2738     assert(method != nullptr, "method should be set if branch is profiled");
2739     ciMethodData* md = method->method_data_or_null();
2740     assert(md != nullptr, "Sanity");
2741     ciProfileData* data = md->bci_to_data(x->profiled_bci());
2742     assert(data != nullptr, "must have profiling data");
2743     int offset;
2744     if (x->direction() == Goto::taken) {
2745       assert(data->is_BranchData(), "need BranchData for two-way branches");
2746       offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2747     } else if (x->direction() == Goto::not_taken) {
2748       assert(data->is_BranchData(), "need BranchData for two-way branches");
2749       offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2750     } else {
2751       assert(data->is_JumpData(), "need JumpData for branches");
2752       offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2753     }
2754     LIR_Opr md_reg = new_register(T_METADATA);
2755     __ metadata2reg(md->constant_encoding(), md_reg);
2756 
2757     increment_counter(new LIR_Address(md_reg, offset,
2758                                       NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2759   }
2760 
2761   // emit phi-instruction move after safepoint since this simplifies
2762   // describing the state as the safepoint.
2763   move_to_phi(x->state());
2764 
2765   __ jump(x->default_sux());
2766 }
2767 
2768 /**
2769  * Emit profiling code if needed for arguments, parameters, return value types
2770  *
2771  * @param md                    MDO the code will update at runtime
2772  * @param md_base_offset        common offset in the MDO for this profile and subsequent ones
2773  * @param md_offset             offset in the MDO (on top of md_base_offset) for this profile
2774  * @param profiled_k            current profile
2775  * @param obj                   IR node for the object to be profiled
2776  * @param mdp                   register to hold the pointer inside the MDO (md + md_base_offset).
2777  *                              Set once we find an update to make and use for next ones.
2778  * @param not_null              true if we know obj cannot be null
2779  * @param signature_at_call_k   signature at call for obj
2780  * @param callee_signature_k    signature of callee for obj
2781  *                              at call and callee signatures differ at method handle call
2782  * @return                      the only klass we know will ever be seen at this profile point
2783  */
2784 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2785                                     Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2786                                     ciKlass* callee_signature_k) {
2787   ciKlass* result = nullptr;
2788   bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2789   bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2790   // known not to be null or null bit already set and already set to
2791   // unknown: nothing we can do to improve profiling
2792   if (!do_null && !do_update) {
2793     return result;
2794   }
2795 
2796   ciKlass* exact_klass = nullptr;
2797   Compilation* comp = Compilation::current();
2798   if (do_update) {
2799     // try to find exact type, using CHA if possible, so that loading
2800     // the klass from the object can be avoided
2801     ciType* type = obj->exact_type();
2802     if (type == nullptr) {
2803       type = obj->declared_type();
2804       type = comp->cha_exact_type(type);
2805     }
2806     assert(type == nullptr || type->is_klass(), "type should be class");
2807     exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2808 
2809     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2810   }
2811 
2812   if (!do_null && !do_update) {
2813     return result;
2814   }
2815 
2816   ciKlass* exact_signature_k = nullptr;
2817   if (do_update && signature_at_call_k != nullptr) {
2818     // Is the type from the signature exact (the only one possible)?
2819     exact_signature_k = signature_at_call_k->exact_klass();
2820     if (exact_signature_k == nullptr) {
2821       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2822     } else {
2823       result = exact_signature_k;
2824       // Known statically. No need to emit any code: prevent
2825       // LIR_Assembler::emit_profile_type() from emitting useless code
2826       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2827     }
2828     // exact_klass and exact_signature_k can be both non null but
2829     // different if exact_klass is loaded after the ciObject for
2830     // exact_signature_k is created.
2831     if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2832       // sometimes the type of the signature is better than the best type
2833       // the compiler has
2834       exact_klass = exact_signature_k;
2835     }
2836     if (callee_signature_k != nullptr &&
2837         callee_signature_k != signature_at_call_k) {
2838       ciKlass* improved_klass = callee_signature_k->exact_klass();
2839       if (improved_klass == nullptr) {
2840         improved_klass = comp->cha_exact_type(callee_signature_k);
2841       }
2842       if (exact_klass == nullptr && improved_klass != nullptr && exact_klass != improved_klass) {
2843         exact_klass = exact_signature_k;
2844       }
2845     }
2846     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2847   }
2848 
2849   if (exact_klass != nullptr && exact_klass->is_obj_array_klass()) {
2850     if (exact_klass->can_be_inline_array_klass()) {
2851       // Inline type arrays can have additional properties, we need to load the klass
2852       // TODO 8350865 Can we do better here and track the properties?
2853       exact_klass = nullptr;
2854       do_update = true;
2855     } else {
2856       // For a direct pointer comparison, we need the refined array klass pointer
2857       exact_klass = ciObjArrayKlass::make(exact_klass->as_array_klass()->element_klass());
2858       do_update = ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2859     }
2860   }
2861   if (!do_null && !do_update) {
2862     return result;
2863   }
2864 
2865   if (mdp == LIR_OprFact::illegalOpr) {
2866     mdp = new_register(T_METADATA);
2867     __ metadata2reg(md->constant_encoding(), mdp);
2868     if (md_base_offset != 0) {
2869       LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2870       mdp = new_pointer_register();
2871       __ leal(LIR_OprFact::address(base_type_address), mdp);
2872     }
2873   }
2874   LIRItem value(obj, this);
2875   value.load_item();
2876   __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2877                   value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != nullptr);
2878   return result;
2879 }
2880 
2881 // profile parameters on entry to the root of the compilation
2882 void LIRGenerator::profile_parameters(Base* x) {
2883   if (compilation()->profile_parameters()) {
2884     CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2885     ciMethodData* md = scope()->method()->method_data_or_null();
2886     assert(md != nullptr, "Sanity");
2887 
2888     if (md->parameters_type_data() != nullptr) {
2889       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2890       ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
2891       LIR_Opr mdp = LIR_OprFact::illegalOpr;
2892       for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2893         LIR_Opr src = args->at(i);
2894         assert(!src->is_illegal(), "check");
2895         BasicType t = src->type();
2896         if (is_reference_type(t)) {
2897           intptr_t profiled_k = parameters->type(j);
2898           Local* local = x->state()->local_at(java_index)->as_Local();
2899           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2900                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2901                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2902           // If the profile is known statically set it once for all and do not emit any code
2903           if (exact != nullptr) {
2904             md->set_parameter_type(j, exact);
2905           }
2906           j++;
2907         }
2908         java_index += type2size[t];
2909       }
2910     }
2911   }
2912 }
2913 
2914 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
2915   assert(md != nullptr && data != nullptr, "should have been initialized");
2916   LIR_Opr mdp = new_register(T_METADATA);
2917   __ metadata2reg(md->constant_encoding(), mdp);
2918   LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
2919   LIR_Opr flags = new_register(T_INT);
2920   __ move(addr, flags);
2921   LIR_Opr update;
2922   if (condition != lir_cond_always) {
2923     update = new_register(T_INT);
2924     __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
2925   } else {
2926     update = LIR_OprFact::intConst(flag);
2927   }
2928   __ logical_or(flags, update, flags);
2929   __ store(flags, addr);
2930 }
2931 
2932 void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ciProfileData* data) {
2933   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2934   LabelObj* L_end = new LabelObj();
2935   LIR_Opr tmp = new_register(T_METADATA);
2936   __ check_null_free_array(array.result(), tmp);
2937 
2938   profile_flags(md, data, ArrayStoreData::null_free_array_byte_constant(), lir_cond_equal);
2939 }
2940 
2941 template <class ArrayData> void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ArrayData*& load_store) {
2942   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2943   LIR_Opr mdp = LIR_OprFact::illegalOpr;
2944   profile_type(md, md->byte_offset_of_slot(load_store, ArrayData::array_offset()), 0,
2945                load_store->array()->type(), x->array(), mdp, true, nullptr, nullptr);
2946 }
2947 
2948 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadData* load_data) {
2949   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2950   assert(md != nullptr && load_data != nullptr, "should have been initialized");
2951   LIR_Opr mdp = LIR_OprFact::illegalOpr;
2952   profile_type(md, md->byte_offset_of_slot(load_data, ArrayLoadData::element_offset()), 0,
2953                load_data->element()->type(), element, mdp, false, nullptr, nullptr);
2954 }
2955 
2956 void LIRGenerator::do_Base(Base* x) {
2957   __ std_entry(LIR_OprFact::illegalOpr);
2958   // Emit moves from physical registers / stack slots to virtual registers
2959   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2960   IRScope* irScope = compilation()->hir()->top_scope();
2961   int java_index = 0;
2962   for (int i = 0; i < args->length(); i++) {
2963     LIR_Opr src = args->at(i);
2964     assert(!src->is_illegal(), "check");
2965     BasicType t = src->type();
2966 
2967     // Types which are smaller than int are passed as int, so
2968     // correct the type which passed.
2969     switch (t) {
2970     case T_BYTE:
2971     case T_BOOLEAN:
2972     case T_SHORT:
2973     case T_CHAR:
2974       t = T_INT;
2975       break;
2976     default:
2977       break;
2978     }
2979 
2980     LIR_Opr dest = new_register(t);
2981     __ move(src, dest);
2982 
2983     // Assign new location to Local instruction for this local
2984     Local* local = x->state()->local_at(java_index)->as_Local();
2985     assert(local != nullptr, "Locals for incoming arguments must have been created");
2986 #ifndef __SOFTFP__
2987     // The java calling convention passes double as long and float as int.
2988     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2989 #endif // __SOFTFP__
2990     local->set_operand(dest);
2991 #ifdef ASSERT
2992     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, nullptr);
2993 #endif
2994     java_index += type2size[t];
2995   }
2996 
2997   // Check if we need a membar at the beginning of the java.lang.Object
2998   // constructor to satisfy the memory model for strict fields.
2999   if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
3000     __ membar_storestore();
3001   }
3002 
3003   if (compilation()->env()->dtrace_method_probes()) {
3004     BasicTypeList signature;
3005     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3006     signature.append(T_METADATA); // Method*
3007     LIR_OprList* args = new LIR_OprList();
3008     args->append(getThreadPointer());
3009     LIR_Opr meth = new_register(T_METADATA);
3010     __ metadata2reg(method()->constant_encoding(), meth);
3011     args->append(meth);
3012     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, nullptr);
3013   }
3014 
3015   if (method()->is_synchronized()) {
3016     LIR_Opr obj;
3017     if (method()->is_static()) {
3018       obj = new_register(T_OBJECT);
3019       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
3020     } else {
3021       Local* receiver = x->state()->local_at(0)->as_Local();
3022       assert(receiver != nullptr, "must already exist");
3023       obj = receiver->operand();
3024     }
3025     assert(obj->is_valid(), "must be valid");
3026 
3027     if (method()->is_synchronized()) {
3028       LIR_Opr lock = syncLockOpr();
3029       __ load_stack_address_monitor(0, lock);
3030 
3031       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
3032       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3033 
3034       // receiver is guaranteed non-null so don't need CodeEmitInfo
3035       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
3036     }
3037   }
3038   // increment invocation counters if needed
3039   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3040     profile_parameters(x);
3041     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
3042     increment_invocation_counter(info);
3043   }
3044   if (method()->has_scalarized_args()) {
3045     // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
3046     // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
3047     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), nullptr, false);
3048     CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
3049     __ append(new LIR_Op0(lir_check_orig_pc));
3050     __ branch(lir_cond_notEqual, deopt_stub);
3051   }
3052 
3053   // all blocks with a successor must end with an unconditional jump
3054   // to the successor even if they are consecutive
3055   __ jump(x->default_sux());
3056 }
3057 
3058 
3059 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3060   // construct our frame and model the production of incoming pointer
3061   // to the OSR buffer.
3062   __ osr_entry(LIR_Assembler::osrBufferPointer());
3063   LIR_Opr result = rlock_result(x);
3064   __ move(LIR_Assembler::osrBufferPointer(), result);
3065 }
3066 
3067 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3068   if (loc->is_register()) {
3069     param->load_item_force(loc);
3070   } else {
3071     LIR_Address* addr = loc->as_address_ptr();
3072     param->load_for_store(addr->type());
3073     if (addr->type() == T_OBJECT) {
3074       __ move_wide(param->result(), addr);
3075     } else {
3076       __ move(param->result(), addr);
3077     }
3078   }
3079 }
3080 
3081 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3082   assert(args->length() == arg_list->length(),
3083          "args=%d, arg_list=%d", args->length(), arg_list->length());
3084   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3085     LIRItem* param = args->at(i);
3086     LIR_Opr loc = arg_list->at(i);
3087     invoke_load_one_argument(param, loc);
3088   }
3089 
3090   if (x->has_receiver()) {
3091     LIRItem* receiver = args->at(0);
3092     LIR_Opr loc = arg_list->at(0);
3093     if (loc->is_register()) {
3094       receiver->load_item_force(loc);
3095     } else {
3096       assert(loc->is_address(), "just checking");
3097       receiver->load_for_store(T_OBJECT);
3098       __ move_wide(receiver->result(), loc->as_address_ptr());
3099     }
3100   }
3101 }
3102 
3103 
3104 // Visits all arguments, returns appropriate items without loading them
3105 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3106   LIRItemList* argument_items = new LIRItemList();
3107   if (x->has_receiver()) {
3108     LIRItem* receiver = new LIRItem(x->receiver(), this);
3109     argument_items->append(receiver);
3110   }
3111   for (int i = 0; i < x->number_of_arguments(); i++) {
3112     LIRItem* param = new LIRItem(x->argument_at(i), this);
3113     argument_items->append(param);
3114   }
3115   return argument_items;
3116 }
3117 
3118 
3119 // The invoke with receiver has following phases:
3120 //   a) traverse and load/lock receiver;
3121 //   b) traverse all arguments -> item-array (invoke_visit_argument)
3122 //   c) push receiver on stack
3123 //   d) load each of the items and push on stack
3124 //   e) unlock receiver
3125 //   f) move receiver into receiver-register %o0
3126 //   g) lock result registers and emit call operation
3127 //
3128 // Before issuing a call, we must spill-save all values on stack
3129 // that are in caller-save register. "spill-save" moves those registers
3130 // either in a free callee-save register or spills them if no free
3131 // callee save register is available.
3132 //
3133 // The problem is where to invoke spill-save.
3134 // - if invoked between e) and f), we may lock callee save
3135 //   register in "spill-save" that destroys the receiver register
3136 //   before f) is executed
3137 // - if we rearrange f) to be earlier (by loading %o0) it
3138 //   may destroy a value on the stack that is currently in %o0
3139 //   and is waiting to be spilled
3140 // - if we keep the receiver locked while doing spill-save,
3141 //   we cannot spill it as it is spill-locked
3142 //
3143 void LIRGenerator::do_Invoke(Invoke* x) {
3144   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
3145 
3146   LIR_OprList* arg_list = cc->args();
3147   LIRItemList* args = invoke_visit_arguments(x);
3148   LIR_Opr receiver = LIR_OprFact::illegalOpr;
3149 
3150   // setup result register
3151   LIR_Opr result_register = LIR_OprFact::illegalOpr;
3152   if (x->type() != voidType) {
3153     result_register = result_register_for(x->type());
3154   }
3155 
3156   CodeEmitInfo* info = state_for(x, x->state());
3157 
3158   invoke_load_arguments(x, args, arg_list);
3159 
3160   if (x->has_receiver()) {
3161     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
3162     receiver = args->at(0)->result();
3163   }
3164 
3165   // emit invoke code
3166   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
3167 
3168   ciMethod* target = x->target();
3169   switch (x->code()) {
3170     case Bytecodes::_invokestatic:
3171       __ call_static(target, result_register,
3172                      SharedRuntime::get_resolve_static_call_stub(),
3173                      arg_list, info);
3174       break;
3175     case Bytecodes::_invokespecial:
3176     case Bytecodes::_invokevirtual:
3177     case Bytecodes::_invokeinterface:
3178       // for loaded and final (method or class) target we still produce an inline cache,
3179       // in order to be able to call mixed mode
3180       if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
3181         __ call_opt_virtual(target, receiver, result_register,
3182                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
3183                             arg_list, info);
3184       } else {
3185         __ call_icvirtual(target, receiver, result_register,
3186                           SharedRuntime::get_resolve_virtual_call_stub(),
3187                           arg_list, info);
3188       }
3189       break;
3190     case Bytecodes::_invokedynamic: {
3191       __ call_dynamic(target, receiver, result_register,
3192                       SharedRuntime::get_resolve_static_call_stub(),
3193                       arg_list, info);
3194       break;
3195     }
3196     default:
3197       fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
3198       break;
3199   }
3200 
3201   if (result_register->is_valid()) {
3202     LIR_Opr result = rlock_result(x);
3203     __ move(result_register, result);
3204   }
3205 }
3206 
3207 
3208 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3209   assert(x->number_of_arguments() == 1, "wrong type");
3210   LIRItem value       (x->argument_at(0), this);
3211   LIR_Opr reg = rlock_result(x);
3212   value.load_item();
3213   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3214   __ move(tmp, reg);
3215 }
3216 
3217 
3218 
3219 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3220 void LIRGenerator::do_IfOp(IfOp* x) {
3221 #ifdef ASSERT
3222   {
3223     ValueTag xtag = x->x()->type()->tag();
3224     ValueTag ttag = x->tval()->type()->tag();
3225     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3226     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3227     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3228   }
3229 #endif
3230 
3231   LIRItem left(x->x(), this);
3232   LIRItem right(x->y(), this);
3233   left.load_item();
3234   if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3235     right.dont_load_item();
3236   } else {
3237     // substitutability_check() needs to use right as a base register.
3238     right.load_item();
3239   }
3240 
3241   LIRItem t_val(x->tval(), this);
3242   LIRItem f_val(x->fval(), this);
3243   t_val.dont_load_item();
3244   f_val.dont_load_item();
3245 
3246   if (x->substitutability_check()) {
3247     substitutability_check(x, left, right, t_val, f_val);
3248   } else {
3249     LIR_Opr reg = rlock_result(x);
3250     __ cmp(lir_cond(x->cond()), left.result(), right.result());
3251     __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3252   }
3253 }
3254 
3255 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3256   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3257   bool is_acmpeq = (x->cond() == If::eql);
3258   LIR_Opr equal_result     = is_acmpeq ? t_val.result() : f_val.result();
3259   LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3260   LIR_Opr result = rlock_result(x);
3261   CodeEmitInfo* info = state_for(x, x->state_before());
3262 
3263   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3264 }
3265 
3266 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3267   LIR_Opr equal_result     = LIR_OprFact::intConst(1);
3268   LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3269   LIR_Opr result = new_register(T_INT);
3270   CodeEmitInfo* info = state_for(x, x->state_before());
3271 
3272   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3273 
3274   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3275   __ cmp(lir_cond(x->cond()), result, equal_result);
3276 }
3277 
3278 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3279                                                  LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3280                                                  CodeEmitInfo* info) {
3281   LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3282   LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3283 
3284   ciKlass* left_klass = left_val->as_loaded_klass_or_null();
3285   ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3286   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
3287     // No need to load klass -- the operands are statically known to be the same inline klass.
3288   } else {
3289     BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3290     tmp1 = new_register(t_klass);
3291     tmp2 = new_register(t_klass);
3292   }
3293 
3294   CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3295   __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3296                             left_klass, right_klass, tmp1, tmp2, info, slow_path);
3297 }
3298 
3299 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3300   assert(x->number_of_arguments() == 0, "wrong type");
3301   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3302   BasicTypeList signature;
3303   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3304   LIR_Opr reg = result_register_for(x->type());
3305   __ call_runtime_leaf(routine, getThreadTemp(),
3306                        reg, new LIR_OprList());
3307   LIR_Opr result = rlock_result(x);
3308   __ move(reg, result);
3309 }
3310 
3311 
3312 
3313 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3314   switch (x->id()) {
3315   case vmIntrinsics::_intBitsToFloat      :
3316   case vmIntrinsics::_doubleToRawLongBits :
3317   case vmIntrinsics::_longBitsToDouble    :
3318   case vmIntrinsics::_floatToRawIntBits   : {
3319     do_FPIntrinsics(x);
3320     break;
3321   }
3322 
3323 #ifdef JFR_HAVE_INTRINSICS
3324   case vmIntrinsics::_counterTime:
3325     do_RuntimeCall(CAST_FROM_FN_PTR(address, JfrTime::time_function()), x);
3326     break;
3327 #endif
3328 
3329   case vmIntrinsics::_currentTimeMillis:
3330     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);
3331     break;
3332 
3333   case vmIntrinsics::_nanoTime:
3334     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);
3335     break;
3336 
3337   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
3338   case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
3339   case vmIntrinsics::_getClass:       do_getClass(x);      break;
3340   case vmIntrinsics::_getObjectSize:  do_getObjectSize(x); break;
3341   case vmIntrinsics::_currentCarrierThread: do_currentCarrierThread(x); break;
3342   case vmIntrinsics::_currentThread:  do_vthread(x);       break;
3343   case vmIntrinsics::_scopedValueCache: do_scopedValueCache(x); break;
3344 
3345   case vmIntrinsics::_dlog:           // fall through
3346   case vmIntrinsics::_dlog10:         // fall through
3347   case vmIntrinsics::_dabs:           // fall through
3348   case vmIntrinsics::_dsqrt:          // fall through
3349   case vmIntrinsics::_dsqrt_strict:   // fall through
3350   case vmIntrinsics::_dtan:           // fall through
3351   case vmIntrinsics::_dsinh:          // fall through
3352   case vmIntrinsics::_dtanh:          // fall through
3353   case vmIntrinsics::_dsin :          // fall through
3354   case vmIntrinsics::_dcos :          // fall through
3355   case vmIntrinsics::_dcbrt :         // fall through
3356   case vmIntrinsics::_dexp :          // fall through
3357   case vmIntrinsics::_dpow :          do_MathIntrinsic(x); break;
3358   case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
3359 
3360   case vmIntrinsics::_fmaD:           do_FmaIntrinsic(x); break;
3361   case vmIntrinsics::_fmaF:           do_FmaIntrinsic(x); break;
3362 
3363   // Use java.lang.Math intrinsics code since it works for these intrinsics too.
3364   case vmIntrinsics::_floatToFloat16: // fall through
3365   case vmIntrinsics::_float16ToFloat: do_MathIntrinsic(x); break;
3366 
3367   case vmIntrinsics::_Preconditions_checkIndex:
3368     do_PreconditionsCheckIndex(x, T_INT);
3369     break;
3370   case vmIntrinsics::_Preconditions_checkLongIndex:
3371     do_PreconditionsCheckIndex(x, T_LONG);
3372     break;
3373 
3374   case vmIntrinsics::_compareAndSetReference:
3375     do_CompareAndSwap(x, objectType);
3376     break;
3377   case vmIntrinsics::_compareAndSetInt:
3378     do_CompareAndSwap(x, intType);
3379     break;
3380   case vmIntrinsics::_compareAndSetLong:
3381     do_CompareAndSwap(x, longType);
3382     break;
3383 
3384   case vmIntrinsics::_loadFence :
3385     __ membar_acquire();
3386     break;
3387   case vmIntrinsics::_storeFence:
3388     __ membar_release();
3389     break;
3390   case vmIntrinsics::_storeStoreFence:
3391     __ membar_storestore();
3392     break;
3393   case vmIntrinsics::_fullFence :
3394     __ membar();
3395     break;
3396   case vmIntrinsics::_onSpinWait:
3397     __ on_spin_wait();
3398     break;
3399   case vmIntrinsics::_Reference_get0:
3400     do_Reference_get0(x);
3401     break;
3402 
3403   case vmIntrinsics::_updateCRC32:
3404   case vmIntrinsics::_updateBytesCRC32:
3405   case vmIntrinsics::_updateByteBufferCRC32:
3406     do_update_CRC32(x);
3407     break;
3408 
3409   case vmIntrinsics::_updateBytesCRC32C:
3410   case vmIntrinsics::_updateDirectByteBufferCRC32C:
3411     do_update_CRC32C(x);
3412     break;
3413 
3414   case vmIntrinsics::_vectorizedMismatch:
3415     do_vectorizedMismatch(x);
3416     break;
3417 
3418   case vmIntrinsics::_blackhole:
3419     do_blackhole(x);
3420     break;
3421 
3422   default: ShouldNotReachHere(); break;
3423   }
3424 }
3425 
3426 void LIRGenerator::profile_arguments(ProfileCall* x) {
3427   if (compilation()->profile_arguments()) {
3428     int bci = x->bci_of_invoke();
3429     ciMethodData* md = x->method()->method_data_or_null();
3430     assert(md != nullptr, "Sanity");
3431     ciProfileData* data = md->bci_to_data(bci);
3432     if (data != nullptr) {
3433       if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3434           (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3435         ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3436         int base_offset = md->byte_offset_of_slot(data, extra);
3437         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3438         ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3439 
3440         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3441         int start = 0;
3442         int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3443         if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3444           // first argument is not profiled at call (method handle invoke)
3445           assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3446           start = 1;
3447         }
3448         ciSignature* callee_signature = x->callee()->signature();
3449         // method handle call to virtual method
3450         bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3451         ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : nullptr);
3452 
3453         bool ignored_will_link;
3454         ciSignature* signature_at_call = nullptr;
3455         x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3456         ciSignatureStream signature_at_call_stream(signature_at_call);
3457 
3458         // if called through method handle invoke, some arguments may have been popped
3459         for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3460           int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3461           ciKlass* exact = profile_type(md, base_offset, off,
3462               args->type(i), x->profiled_arg_at(i+start), mdp,
3463               !x->arg_needs_null_check(i+start),
3464               signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3465           if (exact != nullptr) {
3466             md->set_argument_type(bci, i, exact);
3467           }
3468         }
3469       } else {
3470 #ifdef ASSERT
3471         Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3472         int n = x->nb_profiled_args();
3473         assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3474             (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3475             "only at JSR292 bytecodes");
3476 #endif
3477       }
3478     }
3479   }
3480 }
3481 
3482 // profile parameters on entry to an inlined method
3483 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3484   if (compilation()->profile_parameters() && x->inlined()) {
3485     ciMethodData* md = x->callee()->method_data_or_null();
3486     if (md != nullptr) {
3487       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3488       if (parameters_type_data != nullptr) {
3489         ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
3490         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3491         bool has_receiver = !x->callee()->is_static();
3492         ciSignature* sig = x->callee()->signature();
3493         ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : nullptr);
3494         int i = 0; // to iterate on the Instructions
3495         Value arg = x->recv();
3496         bool not_null = false;
3497         int bci = x->bci_of_invoke();
3498         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3499         // The first parameter is the receiver so that's what we start
3500         // with if it exists. One exception is method handle call to
3501         // virtual method: the receiver is in the args list
3502         if (arg == nullptr || !Bytecodes::has_receiver(bc)) {
3503           i = 1;
3504           arg = x->profiled_arg_at(0);
3505           not_null = !x->arg_needs_null_check(0);
3506         }
3507         int k = 0; // to iterate on the profile data
3508         for (;;) {
3509           intptr_t profiled_k = parameters->type(k);
3510           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3511                                         in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3512                                         profiled_k, arg, mdp, not_null, sig_stream.next_klass(), nullptr);
3513           // If the profile is known statically set it once for all and do not emit any code
3514           if (exact != nullptr) {
3515             md->set_parameter_type(k, exact);
3516           }
3517           k++;
3518           if (k >= parameters_type_data->number_of_parameters()) {
3519 #ifdef ASSERT
3520             int extra = 0;
3521             if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3522                 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3523                 x->recv() != nullptr && Bytecodes::has_receiver(bc)) {
3524               extra += 1;
3525             }
3526             assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3527 #endif
3528             break;
3529           }
3530           arg = x->profiled_arg_at(i);
3531           not_null = !x->arg_needs_null_check(i);
3532           i++;
3533         }
3534       }
3535     }
3536   }
3537 }
3538 
3539 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3540   // Need recv in a temporary register so it interferes with the other temporaries
3541   LIR_Opr recv = LIR_OprFact::illegalOpr;
3542   LIR_Opr mdo = new_register(T_METADATA);
3543   // tmp is used to hold the counters on SPARC
3544   LIR_Opr tmp = new_pointer_register();
3545 
3546   if (x->nb_profiled_args() > 0) {
3547     profile_arguments(x);
3548   }
3549 
3550   // profile parameters on inlined method entry including receiver
3551   if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3552     profile_parameters_at_call(x);
3553   }
3554 
3555   if (x->recv() != nullptr) {
3556     LIRItem value(x->recv(), this);
3557     value.load_item();
3558     recv = new_register(T_OBJECT);
3559     __ move(value.result(), recv);
3560   }
3561   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3562 }
3563 
3564 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3565   int bci = x->bci_of_invoke();
3566   ciMethodData* md = x->method()->method_data_or_null();
3567   assert(md != nullptr, "Sanity");
3568   ciProfileData* data = md->bci_to_data(bci);
3569   if (data != nullptr) {
3570     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3571     ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3572     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3573 
3574     bool ignored_will_link;
3575     ciSignature* signature_at_call = nullptr;
3576     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3577 
3578     // The offset within the MDO of the entry to update may be too large
3579     // to be used in load/store instructions on some platforms. So have
3580     // profile_type() compute the address of the profile in a register.
3581     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3582         ret->type(), x->ret(), mdp,
3583         !x->needs_null_check(),
3584         signature_at_call->return_type()->as_klass(),
3585         x->callee()->signature()->return_type()->as_klass());
3586     if (exact != nullptr) {
3587       md->set_return_type(bci, exact);
3588     }
3589   }
3590 }
3591 
3592 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
3593   ciKlass* klass = value->as_loaded_klass_or_null();
3594   if (klass != nullptr) {
3595     if (klass->is_inlinetype()) {
3596       profile_flags(md, data, flag, lir_cond_always);
3597     } else if (klass->can_be_inline_klass()) {
3598       return false;
3599     }
3600   } else {
3601     return false;
3602   }
3603   return true;
3604 }
3605 
3606 
3607 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
3608   ciMethod* method = x->method();
3609   assert(method != nullptr, "method should be set if branch is profiled");
3610   ciMethodData* md = method->method_data_or_null();
3611   assert(md != nullptr, "Sanity");
3612   ciProfileData* data = md->bci_to_data(x->bci());
3613   assert(data != nullptr, "must have profiling data");
3614   assert(data->is_ACmpData(), "need BranchData for two-way branches");
3615   ciACmpData* acmp = (ciACmpData*)data;
3616   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3617   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
3618                acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), nullptr, nullptr);
3619   int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
3620   if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
3621     LIR_Opr mdp = new_register(T_METADATA);
3622     __ metadata2reg(md->constant_encoding(), mdp);
3623     LIRItem value(x->left(), this);
3624     value.load_item();
3625     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3626   }
3627   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
3628                in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
3629                acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), nullptr, nullptr);
3630   if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
3631     LIR_Opr mdp = new_register(T_METADATA);
3632     __ metadata2reg(md->constant_encoding(), mdp);
3633     LIRItem value(x->right(), this);
3634     value.load_item();
3635     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3636   }
3637 }
3638 
3639 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3640   // We can safely ignore accessors here, since c2 will inline them anyway,
3641   // accessors are also always mature.
3642   if (!x->inlinee()->is_accessor()) {
3643     CodeEmitInfo* info = state_for(x, x->state(), true);
3644     // Notify the runtime very infrequently only to take care of counter overflows
3645     int freq_log = Tier23InlineeNotifyFreqLog;
3646     double scale;
3647     if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3648       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3649     }
3650     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3651   }
3652 }
3653 
3654 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3655   if (compilation()->is_profiling()) {
3656 #if defined(X86) && !defined(_LP64)
3657     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3658     LIR_Opr left_copy = new_register(left->type());
3659     __ move(left, left_copy);
3660     __ cmp(cond, left_copy, right);
3661 #else
3662     __ cmp(cond, left, right);
3663 #endif
3664     LIR_Opr step = new_register(T_INT);
3665     LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);
3666     LIR_Opr zero = LIR_OprFact::intConst(0);
3667     __ cmove(cond,
3668         (left_bci < bci) ? plus_one : zero,
3669         (right_bci < bci) ? plus_one : zero,
3670         step, left->type());
3671     increment_backedge_counter(info, step, bci);
3672   }
3673 }
3674 
3675 
3676 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {
3677   int freq_log = 0;
3678   int level = compilation()->env()->comp_level();
3679   if (level == CompLevel_limited_profile) {
3680     freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3681   } else if (level == CompLevel_full_profile) {
3682     freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3683   } else {
3684     ShouldNotReachHere();
3685   }
3686   // Increment the appropriate invocation/backedge counter and notify the runtime.
3687   double scale;
3688   if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3689     freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3690   }
3691   increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
3692 }
3693 
3694 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3695                                                 ciMethod *method, LIR_Opr step, int frequency,
3696                                                 int bci, bool backedge, bool notify) {
3697   assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3698   int level = _compilation->env()->comp_level();
3699   assert(level > CompLevel_simple, "Shouldn't be here");
3700 
3701   int offset = -1;
3702   LIR_Opr counter_holder;
3703   if (level == CompLevel_limited_profile) {
3704     MethodCounters* counters_adr = method->ensure_method_counters();
3705     if (counters_adr == nullptr) {
3706       bailout("method counters allocation failed");
3707       return;
3708     }
3709     counter_holder = new_pointer_register();
3710     __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3711     offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3712                                  MethodCounters::invocation_counter_offset());
3713   } else if (level == CompLevel_full_profile) {
3714     counter_holder = new_register(T_METADATA);
3715     offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3716                                  MethodData::invocation_counter_offset());
3717     ciMethodData* md = method->method_data_or_null();
3718     assert(md != nullptr, "Sanity");
3719     __ metadata2reg(md->constant_encoding(), counter_holder);
3720   } else {
3721     ShouldNotReachHere();
3722   }
3723   LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3724   LIR_Opr result = new_register(T_INT);
3725   __ load(counter, result);
3726   __ add(result, step, result);
3727   __ store(result, counter);
3728   if (notify && (!backedge || UseOnStackReplacement)) {
3729     LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
3730     // The bci for info can point to cmp for if's we want the if bci
3731     CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3732     int freq = frequency << InvocationCounter::count_shift;
3733     if (freq == 0) {
3734       if (!step->is_constant()) {
3735         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3736         __ branch(lir_cond_notEqual, overflow);
3737       } else {
3738         __ branch(lir_cond_always, overflow);
3739       }
3740     } else {
3741       LIR_Opr mask = load_immediate(freq, T_INT);
3742       if (!step->is_constant()) {
3743         // If step is 0, make sure the overflow check below always fails
3744         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3745         __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);
3746       }
3747       __ logical_and(result, mask, result);
3748       __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3749       __ branch(lir_cond_equal, overflow);
3750     }
3751     __ branch_destination(overflow->continuation());
3752   }
3753 }
3754 
3755 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3756   LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3757   BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3758 
3759   if (x->pass_thread()) {
3760     signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3761     args->append(getThreadPointer());
3762   }
3763 
3764   for (int i = 0; i < x->number_of_arguments(); i++) {
3765     Value a = x->argument_at(i);
3766     LIRItem* item = new LIRItem(a, this);
3767     item->load_item();
3768     args->append(item->result());
3769     signature->append(as_BasicType(a->type()));
3770   }
3771 
3772   LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), nullptr);
3773   if (x->type() == voidType) {
3774     set_no_result(x);
3775   } else {
3776     __ move(result, rlock_result(x));
3777   }
3778 }
3779 
3780 #ifdef ASSERT
3781 void LIRGenerator::do_Assert(Assert *x) {
3782   ValueTag tag = x->x()->type()->tag();
3783   If::Condition cond = x->cond();
3784 
3785   LIRItem xitem(x->x(), this);
3786   LIRItem yitem(x->y(), this);
3787   LIRItem* xin = &xitem;
3788   LIRItem* yin = &yitem;
3789 
3790   assert(tag == intTag, "Only integer assertions are valid!");
3791 
3792   xin->load_item();
3793   yin->dont_load_item();
3794 
3795   set_no_result(x);
3796 
3797   LIR_Opr left = xin->result();
3798   LIR_Opr right = yin->result();
3799 
3800   __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3801 }
3802 #endif
3803 
3804 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3805 
3806 
3807   Instruction *a = x->x();
3808   Instruction *b = x->y();
3809   if (!a || StressRangeCheckElimination) {
3810     assert(!b || StressRangeCheckElimination, "B must also be null");
3811 
3812     CodeEmitInfo *info = state_for(x, x->state());
3813     CodeStub* stub = new PredicateFailedStub(info);
3814 
3815     __ jump(stub);
3816   } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3817     int a_int = a->type()->as_IntConstant()->value();
3818     int b_int = b->type()->as_IntConstant()->value();
3819 
3820     bool ok = false;
3821 
3822     switch(x->cond()) {
3823       case Instruction::eql: ok = (a_int == b_int); break;
3824       case Instruction::neq: ok = (a_int != b_int); break;
3825       case Instruction::lss: ok = (a_int < b_int); break;
3826       case Instruction::leq: ok = (a_int <= b_int); break;
3827       case Instruction::gtr: ok = (a_int > b_int); break;
3828       case Instruction::geq: ok = (a_int >= b_int); break;
3829       case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3830       case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3831       default: ShouldNotReachHere();
3832     }
3833 
3834     if (ok) {
3835 
3836       CodeEmitInfo *info = state_for(x, x->state());
3837       CodeStub* stub = new PredicateFailedStub(info);
3838 
3839       __ jump(stub);
3840     }
3841   } else {
3842 
3843     ValueTag tag = x->x()->type()->tag();
3844     If::Condition cond = x->cond();
3845     LIRItem xitem(x->x(), this);
3846     LIRItem yitem(x->y(), this);
3847     LIRItem* xin = &xitem;
3848     LIRItem* yin = &yitem;
3849 
3850     assert(tag == intTag, "Only integer deoptimizations are valid!");
3851 
3852     xin->load_item();
3853     yin->dont_load_item();
3854     set_no_result(x);
3855 
3856     LIR_Opr left = xin->result();
3857     LIR_Opr right = yin->result();
3858 
3859     CodeEmitInfo *info = state_for(x, x->state());
3860     CodeStub* stub = new PredicateFailedStub(info);
3861 
3862     __ cmp(lir_cond(cond), left, right);
3863     __ branch(lir_cond(cond), stub);
3864   }
3865 }
3866 
3867 void LIRGenerator::do_blackhole(Intrinsic *x) {
3868   assert(!x->has_receiver(), "Should have been checked before: only static methods here");
3869   for (int c = 0; c < x->number_of_arguments(); c++) {
3870     // Load the argument
3871     LIRItem vitem(x->argument_at(c), this);
3872     vitem.load_item();
3873     // ...and leave it unused.
3874   }
3875 }
3876 
3877 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3878   LIRItemList args(1);
3879   LIRItem value(arg1, this);
3880   args.append(&value);
3881   BasicTypeList signature;
3882   signature.append(as_BasicType(arg1->type()));
3883 
3884   return call_runtime(&signature, &args, entry, result_type, info);
3885 }
3886 
3887 
3888 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3889   LIRItemList args(2);
3890   LIRItem value1(arg1, this);
3891   LIRItem value2(arg2, this);
3892   args.append(&value1);
3893   args.append(&value2);
3894   BasicTypeList signature;
3895   signature.append(as_BasicType(arg1->type()));
3896   signature.append(as_BasicType(arg2->type()));
3897 
3898   return call_runtime(&signature, &args, entry, result_type, info);
3899 }
3900 
3901 
3902 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3903                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3904   // get a result register
3905   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3906   LIR_Opr result = LIR_OprFact::illegalOpr;
3907   if (result_type->tag() != voidTag) {
3908     result = new_register(result_type);
3909     phys_reg = result_register_for(result_type);
3910   }
3911 
3912   // move the arguments into the correct location
3913   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3914   assert(cc->length() == args->length(), "argument mismatch");
3915   for (int i = 0; i < args->length(); i++) {
3916     LIR_Opr arg = args->at(i);
3917     LIR_Opr loc = cc->at(i);
3918     if (loc->is_register()) {
3919       __ move(arg, loc);
3920     } else {
3921       LIR_Address* addr = loc->as_address_ptr();
3922 //           if (!can_store_as_constant(arg)) {
3923 //             LIR_Opr tmp = new_register(arg->type());
3924 //             __ move(arg, tmp);
3925 //             arg = tmp;
3926 //           }
3927       __ move(arg, addr);
3928     }
3929   }
3930 
3931   if (info) {
3932     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3933   } else {
3934     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3935   }
3936   if (result->is_valid()) {
3937     __ move(phys_reg, result);
3938   }
3939   return result;
3940 }
3941 
3942 
3943 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3944                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3945   // get a result register
3946   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3947   LIR_Opr result = LIR_OprFact::illegalOpr;
3948   if (result_type->tag() != voidTag) {
3949     result = new_register(result_type);
3950     phys_reg = result_register_for(result_type);
3951   }
3952 
3953   // move the arguments into the correct location
3954   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3955 
3956   assert(cc->length() == args->length(), "argument mismatch");
3957   for (int i = 0; i < args->length(); i++) {
3958     LIRItem* arg = args->at(i);
3959     LIR_Opr loc = cc->at(i);
3960     if (loc->is_register()) {
3961       arg->load_item_force(loc);
3962     } else {
3963       LIR_Address* addr = loc->as_address_ptr();
3964       arg->load_for_store(addr->type());
3965       __ move(arg->result(), addr);
3966     }
3967   }
3968 
3969   if (info) {
3970     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3971   } else {
3972     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3973   }
3974   if (result->is_valid()) {
3975     __ move(phys_reg, result);
3976   }
3977   return result;
3978 }
3979 
3980 void LIRGenerator::do_MemBar(MemBar* x) {
3981   LIR_Code code = x->code();
3982   switch(code) {
3983   case lir_membar_acquire   : __ membar_acquire(); break;
3984   case lir_membar_release   : __ membar_release(); break;
3985   case lir_membar           : __ membar(); break;
3986   case lir_membar_loadload  : __ membar_loadload(); break;
3987   case lir_membar_storestore: __ membar_storestore(); break;
3988   case lir_membar_loadstore : __ membar_loadstore(); break;
3989   case lir_membar_storeload : __ membar_storeload(); break;
3990   default                   : ShouldNotReachHere(); break;
3991   }
3992 }
3993 
3994 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3995   LIR_Opr value_fixed = rlock_byte(T_BYTE);
3996   if (two_operand_lir_form) {
3997     __ move(value, value_fixed);
3998     __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3999   } else {
4000     __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
4001   }
4002   LIR_Opr klass = new_register(T_METADATA);
4003   load_klass(array, klass, null_check_info);
4004   null_check_info = nullptr;
4005   LIR_Opr layout = new_register(T_INT);
4006   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
4007   int diffbit = Klass::layout_helper_boolean_diffbit();
4008   __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
4009   __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
4010   __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
4011   value = value_fixed;
4012   return value;
4013 }