1 /*
   2  * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciFlatArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "ci/ciObjArray.hpp"
  38 #include "ci/ciUtilities.hpp"
  39 #include "compiler/compilerDefinitions.inline.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/c1/barrierSetC1.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/vm_version.hpp"
  46 #include "utilities/bitMap.inline.hpp"
  47 #include "utilities/macros.hpp"
  48 #include "utilities/powerOfTwo.hpp"
  49 
  50 #ifdef ASSERT
  51 #define __ gen()->lir(__FILE__, __LINE__)->
  52 #else
  53 #define __ gen()->lir()->
  54 #endif
  55 
  56 #ifndef PATCHED_ADDR
  57 #define PATCHED_ADDR  (max_jint)
  58 #endif
  59 
  60 void PhiResolverState::reset() {
  61   _virtual_operands.clear();
  62   _other_operands.clear();
  63   _vreg_table.clear();
  64 }
  65 
  66 
  67 //--------------------------------------------------------------
  68 // PhiResolver
  69 
  70 // Resolves cycles:
  71 //
  72 //  r1 := r2  becomes  temp := r1
  73 //  r2 := r1           r1 := r2
  74 //                     r2 := temp
  75 // and orders moves:
  76 //
  77 //  r2 := r3  becomes  r1 := r2
  78 //  r1 := r2           r2 := r3
  79 
  80 PhiResolver::PhiResolver(LIRGenerator* gen)
  81  : _gen(gen)
  82  , _state(gen->resolver_state())
  83  , _loop(nullptr)
  84  , _temp(LIR_OprFact::illegalOpr)
  85 {
  86   // reinitialize the shared state arrays
  87   _state.reset();
  88 }
  89 
  90 
  91 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
  92   assert(src->is_valid(), "");
  93   assert(dest->is_valid(), "");
  94   __ move(src, dest);
  95 }
  96 
  97 
  98 void PhiResolver::move_temp_to(LIR_Opr dest) {
  99   assert(_temp->is_valid(), "");
 100   emit_move(_temp, dest);
 101   NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
 102 }
 103 
 104 
 105 void PhiResolver::move_to_temp(LIR_Opr src) {
 106   assert(_temp->is_illegal(), "");
 107   _temp = _gen->new_register(src->type());
 108   emit_move(src, _temp);
 109 }
 110 
 111 
 112 // Traverse assignment graph in depth first order and generate moves in post order
 113 // ie. two assignments: b := c, a := b start with node c:
 114 // Call graph: move(null, c) -> move(c, b) -> move(b, a)
 115 // Generates moves in this order: move b to a and move c to b
 116 // ie. cycle a := b, b := a start with node a
 117 // Call graph: move(null, a) -> move(a, b) -> move(b, a)
 118 // Generates moves in this order: move b to temp, move a to b, move temp to a
 119 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
 120   if (!dest->visited()) {
 121     dest->set_visited();
 122     for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
 123       move(dest, dest->destination_at(i));
 124     }
 125   } else if (!dest->start_node()) {
 126     // cylce in graph detected
 127     assert(_loop == nullptr, "only one loop valid!");
 128     _loop = dest;
 129     move_to_temp(src->operand());
 130     return;
 131   } // else dest is a start node
 132 
 133   if (!dest->assigned()) {
 134     if (_loop == dest) {
 135       move_temp_to(dest->operand());
 136       dest->set_assigned();
 137     } else if (src != nullptr) {
 138       emit_move(src->operand(), dest->operand());
 139       dest->set_assigned();
 140     }
 141   }
 142 }
 143 
 144 
 145 PhiResolver::~PhiResolver() {
 146   int i;
 147   // resolve any cycles in moves from and to virtual registers
 148   for (i = virtual_operands().length() - 1; i >= 0; i --) {
 149     ResolveNode* node = virtual_operands().at(i);
 150     if (!node->visited()) {
 151       _loop = nullptr;
 152       move(nullptr, node);
 153       node->set_start_node();
 154       assert(_temp->is_illegal(), "move_temp_to() call missing");
 155     }
 156   }
 157 
 158   // generate move for move from non virtual register to abitrary destination
 159   for (i = other_operands().length() - 1; i >= 0; i --) {
 160     ResolveNode* node = other_operands().at(i);
 161     for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
 162       emit_move(node->operand(), node->destination_at(j)->operand());
 163     }
 164   }
 165 }
 166 
 167 
 168 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
 169   ResolveNode* node;
 170   if (opr->is_virtual()) {
 171     int vreg_num = opr->vreg_number();
 172     node = vreg_table().at_grow(vreg_num, nullptr);
 173     assert(node == nullptr || node->operand() == opr, "");
 174     if (node == nullptr) {
 175       node = new ResolveNode(opr);
 176       vreg_table().at_put(vreg_num, node);
 177     }
 178     // Make sure that all virtual operands show up in the list when
 179     // they are used as the source of a move.
 180     if (source && !virtual_operands().contains(node)) {
 181       virtual_operands().append(node);
 182     }
 183   } else {
 184     assert(source, "");
 185     node = new ResolveNode(opr);
 186     other_operands().append(node);
 187   }
 188   return node;
 189 }
 190 
 191 
 192 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
 193   assert(dest->is_virtual(), "");
 194   // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
 195   assert(src->is_valid(), "");
 196   assert(dest->is_valid(), "");
 197   ResolveNode* source = source_node(src);
 198   source->append(destination_node(dest));
 199 }
 200 
 201 
 202 //--------------------------------------------------------------
 203 // LIRItem
 204 
 205 void LIRItem::set_result(LIR_Opr opr) {
 206   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 207   value()->set_operand(opr);
 208 
 209   if (opr->is_virtual()) {
 210     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
 211   }
 212 
 213   _result = opr;
 214 }
 215 
 216 void LIRItem::load_item() {
 217   assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
 218 
 219   if (result()->is_illegal()) {
 220     // update the items result
 221     _result = value()->operand();
 222   }
 223   if (!result()->is_register()) {
 224     LIR_Opr reg = _gen->new_register(value()->type());
 225     __ move(result(), reg);
 226     if (result()->is_constant()) {
 227       _result = reg;
 228     } else {
 229       set_result(reg);
 230     }
 231   }
 232 }
 233 
 234 
 235 void LIRItem::load_for_store(BasicType type) {
 236   if (_gen->can_store_as_constant(value(), type)) {
 237     _result = value()->operand();
 238     if (!_result->is_constant()) {
 239       _result = LIR_OprFact::value_type(value()->type());
 240     }
 241   } else if (type == T_BYTE || type == T_BOOLEAN) {
 242     load_byte_item();
 243   } else {
 244     load_item();
 245   }
 246 }
 247 
 248 void LIRItem::load_item_force(LIR_Opr reg) {
 249   LIR_Opr r = result();
 250   if (r != reg) {
 251 #if !defined(ARM) && !defined(E500V2)
 252     if (r->type() != reg->type()) {
 253       // moves between different types need an intervening spill slot
 254       r = _gen->force_to_spill(r, reg->type());
 255     }
 256 #endif
 257     __ move(r, reg);
 258     _result = reg;
 259   }
 260 }
 261 
 262 ciObject* LIRItem::get_jobject_constant() const {
 263   ObjectType* oc = type()->as_ObjectType();
 264   if (oc) {
 265     return oc->constant_value();
 266   }
 267   return nullptr;
 268 }
 269 
 270 
 271 jint LIRItem::get_jint_constant() const {
 272   assert(is_constant() && value() != nullptr, "");
 273   assert(type()->as_IntConstant() != nullptr, "type check");
 274   return type()->as_IntConstant()->value();
 275 }
 276 
 277 
 278 jint LIRItem::get_address_constant() const {
 279   assert(is_constant() && value() != nullptr, "");
 280   assert(type()->as_AddressConstant() != nullptr, "type check");
 281   return type()->as_AddressConstant()->value();
 282 }
 283 
 284 
 285 jfloat LIRItem::get_jfloat_constant() const {
 286   assert(is_constant() && value() != nullptr, "");
 287   assert(type()->as_FloatConstant() != nullptr, "type check");
 288   return type()->as_FloatConstant()->value();
 289 }
 290 
 291 
 292 jdouble LIRItem::get_jdouble_constant() const {
 293   assert(is_constant() && value() != nullptr, "");
 294   assert(type()->as_DoubleConstant() != nullptr, "type check");
 295   return type()->as_DoubleConstant()->value();
 296 }
 297 
 298 
 299 jlong LIRItem::get_jlong_constant() const {
 300   assert(is_constant() && value() != nullptr, "");
 301   assert(type()->as_LongConstant() != nullptr, "type check");
 302   return type()->as_LongConstant()->value();
 303 }
 304 
 305 
 306 
 307 //--------------------------------------------------------------
 308 
 309 
 310 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 311 #ifndef PRODUCT
 312   if (PrintIRWithLIR) {
 313     block->print();
 314   }
 315 #endif
 316 
 317   // set up the list of LIR instructions
 318   assert(block->lir() == nullptr, "LIR list already computed for this block");
 319   _lir = new LIR_List(compilation(), block);
 320   block->set_lir(_lir);
 321 
 322   __ branch_destination(block->label());
 323 
 324   if (LIRTraceExecution &&
 325       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
 326       !block->is_set(BlockBegin::exception_entry_flag)) {
 327     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
 328     trace_block_entry(block);
 329   }
 330 }
 331 
 332 
 333 void LIRGenerator::block_do_epilog(BlockBegin* block) {
 334 #ifndef PRODUCT
 335   if (PrintIRWithLIR) {
 336     tty->cr();
 337   }
 338 #endif
 339 
 340   // LIR_Opr for unpinned constants shouldn't be referenced by other
 341   // blocks so clear them out after processing the block.
 342   for (int i = 0; i < _unpinned_constants.length(); i++) {
 343     _unpinned_constants.at(i)->clear_operand();
 344   }
 345   _unpinned_constants.trunc_to(0);
 346 
 347   // clear our any registers for other local constants
 348   _constants.trunc_to(0);
 349   _reg_for_constants.trunc_to(0);
 350 }
 351 
 352 
 353 void LIRGenerator::block_do(BlockBegin* block) {
 354   CHECK_BAILOUT();
 355 
 356   block_do_prolog(block);
 357   set_block(block);
 358 
 359   for (Instruction* instr = block; instr != nullptr; instr = instr->next()) {
 360     if (instr->is_pinned()) do_root(instr);
 361   }
 362 
 363   set_block(nullptr);
 364   block_do_epilog(block);
 365 }
 366 
 367 
 368 //-------------------------LIRGenerator-----------------------------
 369 
 370 // This is where the tree-walk starts; instr must be root;
 371 void LIRGenerator::do_root(Value instr) {
 372   CHECK_BAILOUT();
 373 
 374   InstructionMark im(compilation(), instr);
 375 
 376   assert(instr->is_pinned(), "use only with roots");
 377   assert(instr->subst() == instr, "shouldn't have missed substitution");
 378 
 379   instr->visit(this);
 380 
 381   assert(!instr->has_uses() || instr->operand()->is_valid() ||
 382          instr->as_Constant() != nullptr || bailed_out(), "invalid item set");
 383 }
 384 
 385 
 386 // This is called for each node in tree; the walk stops if a root is reached
 387 void LIRGenerator::walk(Value instr) {
 388   InstructionMark im(compilation(), instr);
 389   //stop walk when encounter a root
 390   if ((instr->is_pinned() && instr->as_Phi() == nullptr) || instr->operand()->is_valid()) {
 391     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != nullptr, "this root has not yet been visited");
 392   } else {
 393     assert(instr->subst() == instr, "shouldn't have missed substitution");
 394     instr->visit(this);
 395     // assert(instr->use_count() > 0 || instr->as_Phi() != nullptr, "leaf instruction must have a use");
 396   }
 397 }
 398 
 399 
 400 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
 401   assert(state != nullptr, "state must be defined");
 402 
 403 #ifndef PRODUCT
 404   state->verify();
 405 #endif
 406 
 407   ValueStack* s = state;
 408   for_each_state(s) {
 409     if (s->kind() == ValueStack::EmptyExceptionState) {
 410       assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
 411       continue;
 412     }
 413 
 414     int index;
 415     Value value;
 416     for_each_stack_value(s, index, value) {
 417       assert(value->subst() == value, "missed substitution");
 418       if (!value->is_pinned() && value->as_Constant() == nullptr && value->as_Local() == nullptr) {
 419         walk(value);
 420         assert(value->operand()->is_valid(), "must be evaluated now");
 421       }
 422     }
 423 
 424     int bci = s->bci();
 425     IRScope* scope = s->scope();
 426     ciMethod* method = scope->method();
 427 
 428     MethodLivenessResult liveness = method->liveness_at_bci(bci);
 429     if (bci == SynchronizationEntryBCI) {
 430       if (x->as_ExceptionObject() || x->as_Throw()) {
 431         // all locals are dead on exit from the synthetic unlocker
 432         liveness.clear();
 433       } else {
 434         assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
 435       }
 436     }
 437     if (!liveness.is_valid()) {
 438       // Degenerate or breakpointed method.
 439       bailout("Degenerate or breakpointed method");
 440     } else {
 441       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
 442       for_each_local_value(s, index, value) {
 443         assert(value->subst() == value, "missed substitution");
 444         if (liveness.at(index) && !value->type()->is_illegal()) {
 445           if (!value->is_pinned() && value->as_Constant() == nullptr && value->as_Local() == nullptr) {
 446             walk(value);
 447             assert(value->operand()->is_valid(), "must be evaluated now");
 448           }
 449         } else {
 450           // null out this local so that linear scan can assume that all non-null values are live.
 451           s->invalidate_local(index);
 452         }
 453       }
 454     }
 455   }
 456 
 457   return new CodeEmitInfo(state, ignore_xhandler ? nullptr : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
 458 }
 459 
 460 
 461 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
 462   return state_for(x, x->exception_state());
 463 }
 464 
 465 
 466 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
 467   /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
 468    * is active and the class hasn't yet been resolved we need to emit a patch that resolves
 469    * the class. */
 470   if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
 471     assert(info != nullptr, "info must be set if class is not loaded");
 472     __ klass2reg_patch(nullptr, r, info);
 473   } else {
 474     // no patching needed
 475     __ metadata2reg(obj->constant_encoding(), r);
 476   }
 477 }
 478 
 479 
 480 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
 481                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
 482   CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
 483   if (index->is_constant()) {
 484     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
 485                 index->as_jint(), null_check_info);
 486     __ branch(lir_cond_belowEqual, stub); // forward branch
 487   } else {
 488     cmp_reg_mem(lir_cond_aboveEqual, index, array,
 489                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
 490     __ branch(lir_cond_aboveEqual, stub); // forward branch
 491   }
 492 }
 493 
 494 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp_op, CodeEmitInfo* info) {
 495   LIR_Opr result_op = result;
 496   LIR_Opr left_op   = left;
 497   LIR_Opr right_op  = right;
 498 
 499   if (two_operand_lir_form && left_op != result_op) {
 500     assert(right_op != result_op, "malformed");
 501     __ move(left_op, result_op);
 502     left_op = result_op;
 503   }
 504 
 505   switch(code) {
 506     case Bytecodes::_dadd:
 507     case Bytecodes::_fadd:
 508     case Bytecodes::_ladd:
 509     case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
 510     case Bytecodes::_fmul:
 511     case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
 512 
 513     case Bytecodes::_dmul:  __ mul(left_op, right_op, result_op, tmp_op); break;
 514 
 515     case Bytecodes::_imul:
 516       {
 517         bool did_strength_reduce = false;
 518 
 519         if (right->is_constant()) {
 520           jint c = right->as_jint();
 521           if (c > 0 && is_power_of_2(c)) {
 522             // do not need tmp here
 523             __ shift_left(left_op, exact_log2(c), result_op);
 524             did_strength_reduce = true;
 525           } else {
 526             did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
 527           }
 528         }
 529         // we couldn't strength reduce so just emit the multiply
 530         if (!did_strength_reduce) {
 531           __ mul(left_op, right_op, result_op);
 532         }
 533       }
 534       break;
 535 
 536     case Bytecodes::_dsub:
 537     case Bytecodes::_fsub:
 538     case Bytecodes::_lsub:
 539     case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
 540 
 541     case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
 542     // ldiv and lrem are implemented with a direct runtime call
 543 
 544     case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break;
 545 
 546     case Bytecodes::_drem:
 547     case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
 548 
 549     default: ShouldNotReachHere();
 550   }
 551 }
 552 
 553 
 554 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 555   arithmetic_op(code, result, left, right, tmp);
 556 }
 557 
 558 
 559 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
 560   arithmetic_op(code, result, left, right, LIR_OprFact::illegalOpr, info);
 561 }
 562 
 563 
 564 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 565   arithmetic_op(code, result, left, right, tmp);
 566 }
 567 
 568 
 569 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
 570 
 571   if (two_operand_lir_form && value != result_op
 572       // Only 32bit right shifts require two operand form on S390.
 573       S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
 574     assert(count != result_op, "malformed");
 575     __ move(value, result_op);
 576     value = result_op;
 577   }
 578 
 579   assert(count->is_constant() || count->is_register(), "must be");
 580   switch(code) {
 581   case Bytecodes::_ishl:
 582   case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
 583   case Bytecodes::_ishr:
 584   case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
 585   case Bytecodes::_iushr:
 586   case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
 587   default: ShouldNotReachHere();
 588   }
 589 }
 590 
 591 
 592 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
 593   if (two_operand_lir_form && left_op != result_op) {
 594     assert(right_op != result_op, "malformed");
 595     __ move(left_op, result_op);
 596     left_op = result_op;
 597   }
 598 
 599   switch(code) {
 600     case Bytecodes::_iand:
 601     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 602 
 603     case Bytecodes::_ior:
 604     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 605 
 606     case Bytecodes::_ixor:
 607     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 608 
 609     default: ShouldNotReachHere();
 610   }
 611 }
 612 
 613 
 614 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
 615                                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) {
 616   if (!GenerateSynchronizationCode) return;
 617   // for slow path, use debug info for state after successful locking
 618   CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch);
 619   __ load_stack_address_monitor(monitor_no, lock);
 620   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 621   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub);
 622 }
 623 
 624 
 625 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 626   if (!GenerateSynchronizationCode) return;
 627   // setup registers
 628   LIR_Opr hdr = lock;
 629   lock = new_hdr;
 630   CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
 631   __ load_stack_address_monitor(monitor_no, lock);
 632   __ unlock_object(hdr, object, lock, scratch, slow_path);
 633 }
 634 
 635 #ifndef PRODUCT
 636 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 637   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 638     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 639   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 640     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 641   }
 642 }
 643 #endif
 644 
 645 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 646   if (allow_inline) {
 647     assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
 648     __ metadata2reg(klass->constant_encoding(), klass_reg);
 649   } else {
 650     klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 651   }
 652   // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
 653   if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
 654       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 655 
 656     Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
 657 
 658     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 659 
 660     assert(klass->is_loaded(), "must be loaded");
 661     // allocate space for instance
 662     assert(klass->size_helper() > 0, "illegal instance size");
 663     const int instance_size = align_object_size(klass->size_helper());
 664     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 665                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 666   } else {
 667     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, allow_inline ? Runtime1::new_instance_id : Runtime1::new_instance_no_inline_id);
 668     __ jump(slow_path);
 669     __ branch_destination(slow_path->continuation());
 670   }
 671 }
 672 
 673 
 674 static bool is_constant_zero(Instruction* inst) {
 675   IntConstant* c = inst->type()->as_IntConstant();
 676   if (c) {
 677     return (c->value() == 0);
 678   }
 679   return false;
 680 }
 681 
 682 
 683 static bool positive_constant(Instruction* inst) {
 684   IntConstant* c = inst->type()->as_IntConstant();
 685   if (c) {
 686     return (c->value() >= 0);
 687   }
 688   return false;
 689 }
 690 
 691 
 692 static ciArrayKlass* as_array_klass(ciType* type) {
 693   if (type != nullptr && type->is_array_klass() && type->is_loaded()) {
 694     return (ciArrayKlass*)type;
 695   } else {
 696     return nullptr;
 697   }
 698 }
 699 
 700 static ciType* phi_declared_type(Phi* phi) {
 701   ciType* t = phi->operand_at(0)->declared_type();
 702   if (t == nullptr) {
 703     return nullptr;
 704   }
 705   for(int i = 1; i < phi->operand_count(); i++) {
 706     if (t != phi->operand_at(i)->declared_type()) {
 707       return nullptr;
 708     }
 709   }
 710   return t;
 711 }
 712 
 713 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
 714   Instruction* src     = x->argument_at(0);
 715   Instruction* src_pos = x->argument_at(1);
 716   Instruction* dst     = x->argument_at(2);
 717   Instruction* dst_pos = x->argument_at(3);
 718   Instruction* length  = x->argument_at(4);
 719 
 720   // first try to identify the likely type of the arrays involved
 721   ciArrayKlass* expected_type = nullptr;
 722   bool is_exact = false, src_objarray = false, dst_objarray = false;
 723   {
 724     ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
 725     ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
 726     Phi* phi;
 727     if (src_declared_type == nullptr && (phi = src->as_Phi()) != nullptr) {
 728       src_declared_type = as_array_klass(phi_declared_type(phi));
 729     }
 730     ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
 731     ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
 732     if (dst_declared_type == nullptr && (phi = dst->as_Phi()) != nullptr) {
 733       dst_declared_type = as_array_klass(phi_declared_type(phi));
 734     }
 735 
 736     if (src_exact_type != nullptr && src_exact_type == dst_exact_type) {
 737       // the types exactly match so the type is fully known
 738       is_exact = true;
 739       expected_type = src_exact_type;
 740     } else if (dst_exact_type != nullptr && dst_exact_type->is_obj_array_klass()) {
 741       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
 742       ciArrayKlass* src_type = nullptr;
 743       if (src_exact_type != nullptr && src_exact_type->is_obj_array_klass()) {
 744         src_type = (ciArrayKlass*) src_exact_type;
 745       } else if (src_declared_type != nullptr && src_declared_type->is_obj_array_klass()) {
 746         src_type = (ciArrayKlass*) src_declared_type;
 747       }
 748       if (src_type != nullptr) {
 749         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 750           is_exact = true;
 751           expected_type = dst_type;
 752         }
 753       }
 754     }
 755     // at least pass along a good guess
 756     if (expected_type == nullptr) expected_type = dst_exact_type;
 757     if (expected_type == nullptr) expected_type = src_declared_type;
 758     if (expected_type == nullptr) expected_type = dst_declared_type;
 759 
 760     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 761     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 762   }
 763 
 764   // if a probable array type has been identified, figure out if any
 765   // of the required checks for a fast case can be elided.
 766   int flags = LIR_OpArrayCopy::all_flags;
 767 
 768   if (!src->is_loaded_flat_array() && !dst->is_loaded_flat_array()) {
 769     flags &= ~LIR_OpArrayCopy::always_slow_path;
 770   }
 771   if (!src->maybe_flat_array()) {
 772     flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
 773   }
 774   if (!dst->maybe_flat_array() && !dst->maybe_null_free_array()) {
 775     flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
 776   }
 777 
 778   if (!src_objarray)
 779     flags &= ~LIR_OpArrayCopy::src_objarray;
 780   if (!dst_objarray)
 781     flags &= ~LIR_OpArrayCopy::dst_objarray;
 782 
 783   if (!x->arg_needs_null_check(0))
 784     flags &= ~LIR_OpArrayCopy::src_null_check;
 785   if (!x->arg_needs_null_check(2))
 786     flags &= ~LIR_OpArrayCopy::dst_null_check;
 787 
 788 
 789   if (expected_type != nullptr) {
 790     Value length_limit = nullptr;
 791 
 792     IfOp* ifop = length->as_IfOp();
 793     if (ifop != nullptr) {
 794       // look for expressions like min(v, a.length) which ends up as
 795       //   x > y ? y : x  or  x >= y ? y : x
 796       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 797           ifop->x() == ifop->fval() &&
 798           ifop->y() == ifop->tval()) {
 799         length_limit = ifop->y();
 800       }
 801     }
 802 
 803     // try to skip null checks and range checks
 804     NewArray* src_array = src->as_NewArray();
 805     if (src_array != nullptr) {
 806       flags &= ~LIR_OpArrayCopy::src_null_check;
 807       if (length_limit != nullptr &&
 808           src_array->length() == length_limit &&
 809           is_constant_zero(src_pos)) {
 810         flags &= ~LIR_OpArrayCopy::src_range_check;
 811       }
 812     }
 813 
 814     NewArray* dst_array = dst->as_NewArray();
 815     if (dst_array != nullptr) {
 816       flags &= ~LIR_OpArrayCopy::dst_null_check;
 817       if (length_limit != nullptr &&
 818           dst_array->length() == length_limit &&
 819           is_constant_zero(dst_pos)) {
 820         flags &= ~LIR_OpArrayCopy::dst_range_check;
 821       }
 822     }
 823 
 824     // check from incoming constant values
 825     if (positive_constant(src_pos))
 826       flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
 827     if (positive_constant(dst_pos))
 828       flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
 829     if (positive_constant(length))
 830       flags &= ~LIR_OpArrayCopy::length_positive_check;
 831 
 832     // see if the range check can be elided, which might also imply
 833     // that src or dst is non-null.
 834     ArrayLength* al = length->as_ArrayLength();
 835     if (al != nullptr) {
 836       if (al->array() == src) {
 837         // it's the length of the source array
 838         flags &= ~LIR_OpArrayCopy::length_positive_check;
 839         flags &= ~LIR_OpArrayCopy::src_null_check;
 840         if (is_constant_zero(src_pos))
 841           flags &= ~LIR_OpArrayCopy::src_range_check;
 842       }
 843       if (al->array() == dst) {
 844         // it's the length of the destination array
 845         flags &= ~LIR_OpArrayCopy::length_positive_check;
 846         flags &= ~LIR_OpArrayCopy::dst_null_check;
 847         if (is_constant_zero(dst_pos))
 848           flags &= ~LIR_OpArrayCopy::dst_range_check;
 849       }
 850     }
 851     if (is_exact) {
 852       flags &= ~LIR_OpArrayCopy::type_check;
 853     }
 854   }
 855 
 856   IntConstant* src_int = src_pos->type()->as_IntConstant();
 857   IntConstant* dst_int = dst_pos->type()->as_IntConstant();
 858   if (src_int && dst_int) {
 859     int s_offs = src_int->value();
 860     int d_offs = dst_int->value();
 861     if (src_int->value() >= dst_int->value()) {
 862       flags &= ~LIR_OpArrayCopy::overlapping;
 863     }
 864     if (expected_type != nullptr) {
 865       BasicType t = expected_type->element_type()->basic_type();
 866       int element_size = type2aelembytes(t);
 867       if (((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) &&
 868           ((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0)) {
 869         flags &= ~LIR_OpArrayCopy::unaligned;
 870       }
 871     }
 872   } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
 873     // src and dest positions are the same, or dst is zero so assume
 874     // nonoverlapping copy.
 875     flags &= ~LIR_OpArrayCopy::overlapping;
 876   }
 877 
 878   if (src == dst) {
 879     // moving within a single array so no type checks are needed
 880     if (flags & LIR_OpArrayCopy::type_check) {
 881       flags &= ~LIR_OpArrayCopy::type_check;
 882     }
 883   }
 884   *flagsp = flags;
 885   *expected_typep = (ciArrayKlass*)expected_type;
 886 }
 887 
 888 
 889 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
 890   assert(opr->is_register(), "why spill if item is not register?");
 891 
 892   if (strict_fp_requires_explicit_rounding) {
 893 #ifdef IA32
 894     if (UseSSE < 1 && opr->is_single_fpu()) {
 895       LIR_Opr result = new_register(T_FLOAT);
 896       set_vreg_flag(result, must_start_in_memory);
 897       assert(opr->is_register(), "only a register can be spilled");
 898       assert(opr->value_type()->is_float(), "rounding only for floats available");
 899       __ roundfp(opr, LIR_OprFact::illegalOpr, result);
 900       return result;
 901     }
 902 #else
 903     Unimplemented();
 904 #endif // IA32
 905   }
 906   return opr;
 907 }
 908 
 909 
 910 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
 911   assert(type2size[t] == type2size[value->type()],
 912          "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
 913   if (!value->is_register()) {
 914     // force into a register
 915     LIR_Opr r = new_register(value->type());
 916     __ move(value, r);
 917     value = r;
 918   }
 919 
 920   // create a spill location
 921   LIR_Opr tmp = new_register(t);
 922   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
 923 
 924   // move from register to spill
 925   __ move(value, tmp);
 926   return tmp;
 927 }
 928 
 929 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
 930   if (if_instr->should_profile()) {
 931     ciMethod* method = if_instr->profiled_method();
 932     assert(method != nullptr, "method should be set if branch is profiled");
 933     ciMethodData* md = method->method_data_or_null();
 934     assert(md != nullptr, "Sanity");
 935     ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
 936     assert(data != nullptr, "must have profiling data");
 937     assert(data->is_BranchData(), "need BranchData for two-way branches");
 938     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
 939     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
 940     if (if_instr->is_swapped()) {
 941       int t = taken_count_offset;
 942       taken_count_offset = not_taken_count_offset;
 943       not_taken_count_offset = t;
 944     }
 945 
 946     LIR_Opr md_reg = new_register(T_METADATA);
 947     __ metadata2reg(md->constant_encoding(), md_reg);
 948 
 949     LIR_Opr data_offset_reg = new_pointer_register();
 950     __ cmove(lir_cond(cond),
 951              LIR_OprFact::intptrConst(taken_count_offset),
 952              LIR_OprFact::intptrConst(not_taken_count_offset),
 953              data_offset_reg, as_BasicType(if_instr->x()->type()));
 954 
 955     // MDO cells are intptr_t, so the data_reg width is arch-dependent.
 956     LIR_Opr data_reg = new_pointer_register();
 957     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
 958     __ move(data_addr, data_reg);
 959     // Use leal instead of add to avoid destroying condition codes on x86
 960     LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
 961     __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
 962     __ move(data_reg, data_addr);
 963   }
 964 }
 965 
 966 // Phi technique:
 967 // This is about passing live values from one basic block to the other.
 968 // In code generated with Java it is rather rare that more than one
 969 // value is on the stack from one basic block to the other.
 970 // We optimize our technique for efficient passing of one value
 971 // (of type long, int, double..) but it can be extended.
 972 // When entering or leaving a basic block, all registers and all spill
 973 // slots are release and empty. We use the released registers
 974 // and spill slots to pass the live values from one block
 975 // to the other. The topmost value, i.e., the value on TOS of expression
 976 // stack is passed in registers. All other values are stored in spilling
 977 // area. Every Phi has an index which designates its spill slot
 978 // At exit of a basic block, we fill the register(s) and spill slots.
 979 // At entry of a basic block, the block_prolog sets up the content of phi nodes
 980 // and locks necessary registers and spilling slots.
 981 
 982 
 983 // move current value to referenced phi function
 984 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
 985   Phi* phi = sux_val->as_Phi();
 986   // cur_val can be null without phi being null in conjunction with inlining
 987   if (phi != nullptr && cur_val != nullptr && cur_val != phi && !phi->is_illegal()) {
 988     if (phi->is_local()) {
 989       for (int i = 0; i < phi->operand_count(); i++) {
 990         Value op = phi->operand_at(i);
 991         if (op != nullptr && op->type()->is_illegal()) {
 992           bailout("illegal phi operand");
 993         }
 994       }
 995     }
 996     Phi* cur_phi = cur_val->as_Phi();
 997     if (cur_phi != nullptr && cur_phi->is_illegal()) {
 998       // Phi and local would need to get invalidated
 999       // (which is unexpected for Linear Scan).
1000       // But this case is very rare so we simply bail out.
1001       bailout("propagation of illegal phi");
1002       return;
1003     }
1004     LIR_Opr operand = cur_val->operand();
1005     if (operand->is_illegal()) {
1006       assert(cur_val->as_Constant() != nullptr || cur_val->as_Local() != nullptr,
1007              "these can be produced lazily");
1008       operand = operand_for_instruction(cur_val);
1009     }
1010     resolver->move(operand, operand_for_instruction(phi));
1011   }
1012 }
1013 
1014 
1015 // Moves all stack values into their PHI position
1016 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1017   BlockBegin* bb = block();
1018   if (bb->number_of_sux() == 1) {
1019     BlockBegin* sux = bb->sux_at(0);
1020     assert(sux->number_of_preds() > 0, "invalid CFG");
1021 
1022     // a block with only one predecessor never has phi functions
1023     if (sux->number_of_preds() > 1) {
1024       PhiResolver resolver(this);
1025 
1026       ValueStack* sux_state = sux->state();
1027       Value sux_value;
1028       int index;
1029 
1030       assert(cur_state->scope() == sux_state->scope(), "not matching");
1031       assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1032       assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1033 
1034       for_each_stack_value(sux_state, index, sux_value) {
1035         move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1036       }
1037 
1038       for_each_local_value(sux_state, index, sux_value) {
1039         move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1040       }
1041 
1042       assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1043     }
1044   }
1045 }
1046 
1047 
1048 LIR_Opr LIRGenerator::new_register(BasicType type) {
1049   int vreg_num = _virtual_register_number;
1050   // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
1051   // a few extra registers before we really run out which helps to avoid to trip over assertions.
1052   if (vreg_num + 20 >= LIR_Opr::vreg_max) {
1053     bailout("out of virtual registers in LIR generator");
1054     if (vreg_num + 2 >= LIR_Opr::vreg_max) {
1055       // Wrap it around and continue until bailout really happens to avoid hitting assertions.
1056       _virtual_register_number = LIR_Opr::vreg_base;
1057       vreg_num = LIR_Opr::vreg_base;
1058     }
1059   }
1060   _virtual_register_number += 1;
1061   LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);
1062   assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
1063   return vreg;
1064 }
1065 
1066 
1067 // Try to lock using register in hint
1068 LIR_Opr LIRGenerator::rlock(Value instr) {
1069   return new_register(instr->type());
1070 }
1071 
1072 
1073 // does an rlock and sets result
1074 LIR_Opr LIRGenerator::rlock_result(Value x) {
1075   LIR_Opr reg = rlock(x);
1076   set_result(x, reg);
1077   return reg;
1078 }
1079 
1080 
1081 // does an rlock and sets result
1082 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1083   LIR_Opr reg;
1084   switch (type) {
1085   case T_BYTE:
1086   case T_BOOLEAN:
1087     reg = rlock_byte(type);
1088     break;
1089   default:
1090     reg = rlock(x);
1091     break;
1092   }
1093 
1094   set_result(x, reg);
1095   return reg;
1096 }
1097 
1098 
1099 //---------------------------------------------------------------------
1100 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1101   ObjectType* oc = value->type()->as_ObjectType();
1102   if (oc) {
1103     return oc->constant_value();
1104   }
1105   return nullptr;
1106 }
1107 
1108 
1109 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1110   assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1111   assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1112 
1113   // no moves are created for phi functions at the begin of exception
1114   // handlers, so assign operands manually here
1115   for_each_phi_fun(block(), phi,
1116                    if (!phi->is_illegal()) { operand_for_instruction(phi); });
1117 
1118   LIR_Opr thread_reg = getThreadPointer();
1119   __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1120                exceptionOopOpr());
1121   __ move_wide(LIR_OprFact::oopConst(nullptr),
1122                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1123   __ move_wide(LIR_OprFact::oopConst(nullptr),
1124                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1125 
1126   LIR_Opr result = new_register(T_OBJECT);
1127   __ move(exceptionOopOpr(), result);
1128   set_result(x, result);
1129 }
1130 
1131 
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136 //                        visitor functions
1137 //----------------------------------------------------------------------
1138 //----------------------------------------------------------------------
1139 //----------------------------------------------------------------------
1140 //----------------------------------------------------------------------
1141 
1142 void LIRGenerator::do_Phi(Phi* x) {
1143   // phi functions are never visited directly
1144   ShouldNotReachHere();
1145 }
1146 
1147 
1148 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1149 void LIRGenerator::do_Constant(Constant* x) {
1150   if (x->state_before() != nullptr) {
1151     // Any constant with a ValueStack requires patching so emit the patch here
1152     LIR_Opr reg = rlock_result(x);
1153     CodeEmitInfo* info = state_for(x, x->state_before());
1154     __ oop2reg_patch(nullptr, reg, info);
1155   } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1156     if (!x->is_pinned()) {
1157       // unpinned constants are handled specially so that they can be
1158       // put into registers when they are used multiple times within a
1159       // block.  After the block completes their operand will be
1160       // cleared so that other blocks can't refer to that register.
1161       set_result(x, load_constant(x));
1162     } else {
1163       LIR_Opr res = x->operand();
1164       if (!res->is_valid()) {
1165         res = LIR_OprFact::value_type(x->type());
1166       }
1167       if (res->is_constant()) {
1168         LIR_Opr reg = rlock_result(x);
1169         __ move(res, reg);
1170       } else {
1171         set_result(x, res);
1172       }
1173     }
1174   } else {
1175     set_result(x, LIR_OprFact::value_type(x->type()));
1176   }
1177 }
1178 
1179 
1180 void LIRGenerator::do_Local(Local* x) {
1181   // operand_for_instruction has the side effect of setting the result
1182   // so there's no need to do it here.
1183   operand_for_instruction(x);
1184 }
1185 
1186 
1187 void LIRGenerator::do_Return(Return* x) {
1188   if (compilation()->env()->dtrace_method_probes()) {
1189     BasicTypeList signature;
1190     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
1191     signature.append(T_METADATA); // Method*
1192     LIR_OprList* args = new LIR_OprList();
1193     args->append(getThreadPointer());
1194     LIR_Opr meth = new_register(T_METADATA);
1195     __ metadata2reg(method()->constant_encoding(), meth);
1196     args->append(meth);
1197     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, nullptr);
1198   }
1199 
1200   if (x->type()->is_void()) {
1201     __ return_op(LIR_OprFact::illegalOpr);
1202   } else {
1203     LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1204     LIRItem result(x->result(), this);
1205 
1206     result.load_item_force(reg);
1207     __ return_op(result.result());
1208   }
1209   set_no_result(x);
1210 }
1211 
1212 // Example: ref.get()
1213 // Combination of LoadField and g1 pre-write barrier
1214 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1215 
1216   const int referent_offset = java_lang_ref_Reference::referent_offset();
1217 
1218   assert(x->number_of_arguments() == 1, "wrong type");
1219 
1220   LIRItem reference(x->argument_at(0), this);
1221   reference.load_item();
1222 
1223   // need to perform the null check on the reference object
1224   CodeEmitInfo* info = nullptr;
1225   if (x->needs_null_check()) {
1226     info = state_for(x);
1227   }
1228 
1229   LIR_Opr result = rlock_result(x, T_OBJECT);
1230   access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
1231                  reference, LIR_OprFact::intConst(referent_offset), result,
1232                  nullptr, info);
1233 }
1234 
1235 // Example: clazz.isInstance(object)
1236 void LIRGenerator::do_isInstance(Intrinsic* x) {
1237   assert(x->number_of_arguments() == 2, "wrong type");
1238 
1239   // TODO could try to substitute this node with an equivalent InstanceOf
1240   // if clazz is known to be a constant Class. This will pick up newly found
1241   // constants after HIR construction. I'll leave this to a future change.
1242 
1243   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1244   // could follow the aastore example in a future change.
1245 
1246   LIRItem clazz(x->argument_at(0), this);
1247   LIRItem object(x->argument_at(1), this);
1248   clazz.load_item();
1249   object.load_item();
1250   LIR_Opr result = rlock_result(x);
1251 
1252   // need to perform null check on clazz
1253   if (x->needs_null_check()) {
1254     CodeEmitInfo* info = state_for(x);
1255     __ null_check(clazz.result(), info);
1256   }
1257 
1258   LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1259                                      CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1260                                      x->type(),
1261                                      nullptr); // null CodeEmitInfo results in a leaf call
1262   __ move(call_result, result);
1263 }
1264 
1265 void LIRGenerator::load_klass(LIR_Opr obj, LIR_Opr klass, CodeEmitInfo* null_check_info) {
1266   __ load_klass(obj, klass, null_check_info);
1267 }
1268 
1269 // Example: object.getClass ()
1270 void LIRGenerator::do_getClass(Intrinsic* x) {
1271   assert(x->number_of_arguments() == 1, "wrong type");
1272 
1273   LIRItem rcvr(x->argument_at(0), this);
1274   rcvr.load_item();
1275   LIR_Opr temp = new_register(T_ADDRESS);
1276   LIR_Opr result = rlock_result(x);
1277 
1278   // need to perform the null check on the rcvr
1279   CodeEmitInfo* info = nullptr;
1280   if (x->needs_null_check()) {
1281     info = state_for(x);
1282   }
1283 
1284   LIR_Opr klass = new_register(T_METADATA);
1285   load_klass(rcvr.result(), klass, info);
1286   __ move_wide(new LIR_Address(klass, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
1287   // mirror = ((OopHandle)mirror)->resolve();
1288   access_load(IN_NATIVE, T_OBJECT,
1289               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
1290 }
1291 
1292 // java.lang.Class::isPrimitive()
1293 void LIRGenerator::do_isPrimitive(Intrinsic* x) {
1294   assert(x->number_of_arguments() == 1, "wrong type");
1295 
1296   LIRItem rcvr(x->argument_at(0), this);
1297   rcvr.load_item();
1298   LIR_Opr temp = new_register(T_METADATA);
1299   LIR_Opr result = rlock_result(x);
1300 
1301   CodeEmitInfo* info = nullptr;
1302   if (x->needs_null_check()) {
1303     info = state_for(x);
1304   }
1305 
1306   __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset(), T_ADDRESS), temp, info);
1307   __ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0));
1308   __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
1309 }
1310 
1311 // Example: Foo.class.getModifiers()
1312 void LIRGenerator::do_getModifiers(Intrinsic* x) {
1313   assert(x->number_of_arguments() == 1, "wrong type");
1314 
1315   LIRItem receiver(x->argument_at(0), this);
1316   receiver.load_item();
1317   LIR_Opr result = rlock_result(x);
1318 
1319   CodeEmitInfo* info = nullptr;
1320   if (x->needs_null_check()) {
1321     info = state_for(x);
1322   }
1323 
1324   // While reading off the universal constant mirror is less efficient than doing
1325   // another branch and returning the constant answer, this branchless code runs into
1326   // much less risk of confusion for C1 register allocator. The choice of the universe
1327   // object here is correct as long as it returns the same modifiers we would expect
1328   // from the primitive class itself. See spec for Class.getModifiers that provides
1329   // the typed array klasses with similar modifiers as their component types.
1330 
1331   Klass* univ_klass_obj = Universe::byteArrayKlassObj();
1332   assert(univ_klass_obj->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC), "Sanity");
1333   LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass_obj);
1334 
1335   LIR_Opr recv_klass = new_register(T_METADATA);
1336   __ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);
1337 
1338   // Check if this is a Java mirror of primitive type, and select the appropriate klass.
1339   LIR_Opr klass = new_register(T_METADATA);
1340   __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0));
1341   __ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
1342 
1343   // Get the answer.
1344   __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result);
1345 }
1346 
1347 void LIRGenerator::do_getObjectSize(Intrinsic* x) {
1348   assert(x->number_of_arguments() == 3, "wrong type");
1349   LIR_Opr result_reg = rlock_result(x);
1350 
1351   LIRItem value(x->argument_at(2), this);
1352   value.load_item();
1353 
1354   LIR_Opr klass = new_register(T_METADATA);
1355   load_klass(value.result(), klass, nullptr);
1356   LIR_Opr layout = new_register(T_INT);
1357   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1358 
1359   LabelObj* L_done = new LabelObj();
1360   LabelObj* L_array = new LabelObj();
1361 
1362   __ cmp(lir_cond_lessEqual, layout, 0);
1363   __ branch(lir_cond_lessEqual, L_array->label());
1364 
1365   // Instance case: the layout helper gives us instance size almost directly,
1366   // but we need to mask out the _lh_instance_slow_path_bit.
1367 
1368   assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
1369 
1370   LIR_Opr mask = load_immediate(~(jint) right_n_bits(LogBytesPerLong), T_INT);
1371   __ logical_and(layout, mask, layout);
1372   __ convert(Bytecodes::_i2l, layout, result_reg);
1373 
1374   __ branch(lir_cond_always, L_done->label());
1375 
1376   // Array case: size is round(header + element_size*arraylength).
1377   // Since arraylength is different for every array instance, we have to
1378   // compute the whole thing at runtime.
1379 
1380   __ branch_destination(L_array->label());
1381 
1382   int round_mask = MinObjAlignmentInBytes - 1;
1383 
1384   // Figure out header sizes first.
1385   LIR_Opr hss = load_immediate(Klass::_lh_header_size_shift, T_INT);
1386   LIR_Opr hsm = load_immediate(Klass::_lh_header_size_mask, T_INT);
1387 
1388   LIR_Opr header_size = new_register(T_INT);
1389   __ move(layout, header_size);
1390   LIR_Opr tmp = new_register(T_INT);
1391   __ unsigned_shift_right(header_size, hss, header_size, tmp);
1392   __ logical_and(header_size, hsm, header_size);
1393   __ add(header_size, LIR_OprFact::intConst(round_mask), header_size);
1394 
1395   // Figure out the array length in bytes
1396   assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
1397   LIR_Opr l2esm = load_immediate(Klass::_lh_log2_element_size_mask, T_INT);
1398   __ logical_and(layout, l2esm, layout);
1399 
1400   LIR_Opr length_int = new_register(T_INT);
1401   __ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int);
1402 
1403 #ifdef _LP64
1404   LIR_Opr length = new_register(T_LONG);
1405   __ convert(Bytecodes::_i2l, length_int, length);
1406 #endif
1407 
1408   // Shift-left awkwardness. Normally it is just:
1409   //   __ shift_left(length, layout, length);
1410   // But C1 cannot perform shift_left with non-constant count, so we end up
1411   // doing the per-bit loop dance here. x86_32 also does not know how to shift
1412   // longs, so we have to act on ints.
1413   LabelObj* L_shift_loop = new LabelObj();
1414   LabelObj* L_shift_exit = new LabelObj();
1415 
1416   __ branch_destination(L_shift_loop->label());
1417   __ cmp(lir_cond_equal, layout, 0);
1418   __ branch(lir_cond_equal, L_shift_exit->label());
1419 
1420 #ifdef _LP64
1421   __ shift_left(length, 1, length);
1422 #else
1423   __ shift_left(length_int, 1, length_int);
1424 #endif
1425 
1426   __ sub(layout, LIR_OprFact::intConst(1), layout);
1427 
1428   __ branch(lir_cond_always, L_shift_loop->label());
1429   __ branch_destination(L_shift_exit->label());
1430 
1431   // Mix all up, round, and push to the result.
1432 #ifdef _LP64
1433   LIR_Opr header_size_long = new_register(T_LONG);
1434   __ convert(Bytecodes::_i2l, header_size, header_size_long);
1435   __ add(length, header_size_long, length);
1436   if (round_mask != 0) {
1437     LIR_Opr round_mask_opr = load_immediate(~(jlong)round_mask, T_LONG);
1438     __ logical_and(length, round_mask_opr, length);
1439   }
1440   __ move(length, result_reg);
1441 #else
1442   __ add(length_int, header_size, length_int);
1443   if (round_mask != 0) {
1444     LIR_Opr round_mask_opr = load_immediate(~round_mask, T_INT);
1445     __ logical_and(length_int, round_mask_opr, length_int);
1446   }
1447   __ convert(Bytecodes::_i2l, length_int, result_reg);
1448 #endif
1449 
1450   __ branch_destination(L_done->label());
1451 }
1452 
1453 void LIRGenerator::do_scopedValueCache(Intrinsic* x) {
1454   do_JavaThreadField(x, JavaThread::scopedValueCache_offset());
1455 }
1456 
1457 // Example: Thread.currentCarrierThread()
1458 void LIRGenerator::do_currentCarrierThread(Intrinsic* x) {
1459   do_JavaThreadField(x, JavaThread::threadObj_offset());
1460 }
1461 
1462 void LIRGenerator::do_vthread(Intrinsic* x) {
1463   do_JavaThreadField(x, JavaThread::vthread_offset());
1464 }
1465 
1466 void LIRGenerator::do_JavaThreadField(Intrinsic* x, ByteSize offset) {
1467   assert(x->number_of_arguments() == 0, "wrong type");
1468   LIR_Opr temp = new_register(T_ADDRESS);
1469   LIR_Opr reg = rlock_result(x);
1470   __ move(new LIR_Address(getThreadPointer(), in_bytes(offset), T_ADDRESS), temp);
1471   access_load(IN_NATIVE, T_OBJECT,
1472               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg);
1473 }
1474 
1475 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1476   assert(x->number_of_arguments() == 1, "wrong type");
1477   LIRItem receiver(x->argument_at(0), this);
1478 
1479   receiver.load_item();
1480   BasicTypeList signature;
1481   signature.append(T_OBJECT); // receiver
1482   LIR_OprList* args = new LIR_OprList();
1483   args->append(receiver.result());
1484   CodeEmitInfo* info = state_for(x, x->state());
1485   call_runtime(&signature, args,
1486                CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1487                voidType, info);
1488 
1489   set_no_result(x);
1490 }
1491 
1492 
1493 //------------------------local access--------------------------------------
1494 
1495 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1496   if (x->operand()->is_illegal()) {
1497     Constant* c = x->as_Constant();
1498     if (c != nullptr) {
1499       x->set_operand(LIR_OprFact::value_type(c->type()));
1500     } else {
1501       assert(x->as_Phi() || x->as_Local() != nullptr, "only for Phi and Local");
1502       // allocate a virtual register for this local or phi
1503       x->set_operand(rlock(x));
1504       _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, nullptr);
1505     }
1506   }
1507   return x->operand();
1508 }
1509 
1510 
1511 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1512   if (opr->is_virtual()) {
1513     return instruction_for_vreg(opr->vreg_number());
1514   }
1515   return nullptr;
1516 }
1517 
1518 
1519 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1520   if (reg_num < _instruction_for_operand.length()) {
1521     return _instruction_for_operand.at(reg_num);
1522   }
1523   return nullptr;
1524 }
1525 
1526 
1527 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1528   if (_vreg_flags.size_in_bits() == 0) {
1529     BitMap2D temp(100, num_vreg_flags);
1530     _vreg_flags = temp;
1531   }
1532   _vreg_flags.at_put_grow(vreg_num, f, true);
1533 }
1534 
1535 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1536   if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1537     return false;
1538   }
1539   return _vreg_flags.at(vreg_num, f);
1540 }
1541 
1542 
1543 // Block local constant handling.  This code is useful for keeping
1544 // unpinned constants and constants which aren't exposed in the IR in
1545 // registers.  Unpinned Constant instructions have their operands
1546 // cleared when the block is finished so that other blocks can't end
1547 // up referring to their registers.
1548 
1549 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1550   assert(!x->is_pinned(), "only for unpinned constants");
1551   _unpinned_constants.append(x);
1552   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1553 }
1554 
1555 
1556 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1557   BasicType t = c->type();
1558   for (int i = 0; i < _constants.length(); i++) {
1559     LIR_Const* other = _constants.at(i);
1560     if (t == other->type()) {
1561       switch (t) {
1562       case T_INT:
1563       case T_FLOAT:
1564         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1565         break;
1566       case T_LONG:
1567       case T_DOUBLE:
1568         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1569         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1570         break;
1571       case T_OBJECT:
1572         if (c->as_jobject() != other->as_jobject()) continue;
1573         break;
1574       default:
1575         break;
1576       }
1577       return _reg_for_constants.at(i);
1578     }
1579   }
1580 
1581   LIR_Opr result = new_register(t);
1582   __ move((LIR_Opr)c, result);
1583   if (!in_conditional_code()) {
1584     _constants.append(c);
1585     _reg_for_constants.append(result);
1586   }
1587   return result;
1588 }
1589 
1590 void LIRGenerator::set_in_conditional_code(bool v) {
1591   assert(v != _in_conditional_code, "must change state");
1592   _in_conditional_code = v;
1593 }
1594 
1595 
1596 //------------------------field access--------------------------------------
1597 
1598 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1599   assert(x->number_of_arguments() == 4, "wrong type");
1600   LIRItem obj   (x->argument_at(0), this);  // object
1601   LIRItem offset(x->argument_at(1), this);  // offset of field
1602   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1603   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1604   assert(obj.type()->tag() == objectTag, "invalid type");
1605   assert(cmp.type()->tag() == type->tag(), "invalid type");
1606   assert(val.type()->tag() == type->tag(), "invalid type");
1607 
1608   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1609                                             obj, offset, cmp, val);
1610   set_result(x, result);
1611 }
1612 
1613 // Comment copied form templateTable_i486.cpp
1614 // ----------------------------------------------------------------------------
1615 // Volatile variables demand their effects be made known to all CPU's in
1616 // order.  Store buffers on most chips allow reads & writes to reorder; the
1617 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1618 // memory barrier (i.e., it's not sufficient that the interpreter does not
1619 // reorder volatile references, the hardware also must not reorder them).
1620 //
1621 // According to the new Java Memory Model (JMM):
1622 // (1) All volatiles are serialized wrt to each other.
1623 // ALSO reads & writes act as acquire & release, so:
1624 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1625 // the read float up to before the read.  It's OK for non-volatile memory refs
1626 // that happen before the volatile read to float down below it.
1627 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1628 // that happen BEFORE the write float down to after the write.  It's OK for
1629 // non-volatile memory refs that happen after the volatile write to float up
1630 // before it.
1631 //
1632 // We only put in barriers around volatile refs (they are expensive), not
1633 // _between_ memory refs (that would require us to track the flavor of the
1634 // previous memory refs).  Requirements (2) and (3) require some barriers
1635 // before volatile stores and after volatile loads.  These nearly cover
1636 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1637 // case is placed after volatile-stores although it could just as well go
1638 // before volatile-loads.
1639 
1640 
1641 void LIRGenerator::do_StoreField(StoreField* x) {
1642   bool needs_patching = x->needs_patching();
1643   bool is_volatile = x->field()->is_volatile();
1644   BasicType field_type = x->field_type();
1645 
1646   CodeEmitInfo* info = nullptr;
1647   if (needs_patching) {
1648     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1649     info = state_for(x, x->state_before());
1650   } else if (x->needs_null_check()) {
1651     NullCheck* nc = x->explicit_null_check();
1652     if (nc == nullptr) {
1653       info = state_for(x);
1654     } else {
1655       info = state_for(nc);
1656     }
1657   }
1658 
1659   LIRItem object(x->obj(), this);
1660   LIRItem value(x->value(),  this);
1661 
1662   object.load_item();
1663 
1664   if (is_volatile || needs_patching) {
1665     // load item if field is volatile (fewer special cases for volatiles)
1666     // load item if field not initialized
1667     // load item if field not constant
1668     // because of code patching we cannot inline constants
1669     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1670       value.load_byte_item();
1671     } else  {
1672       value.load_item();
1673     }
1674   } else {
1675     value.load_for_store(field_type);
1676   }
1677 
1678   set_no_result(x);
1679 
1680 #ifndef PRODUCT
1681   if (PrintNotLoaded && needs_patching) {
1682     tty->print_cr("   ###class not loaded at store_%s bci %d",
1683                   x->is_static() ?  "static" : "field", x->printable_bci());
1684   }
1685 #endif
1686 
1687   if (!inline_type_field_access_prolog(x)) {
1688     // Field store will always deopt due to unloaded field or holder klass
1689     return;
1690   }
1691 
1692   if (x->needs_null_check() &&
1693       (needs_patching ||
1694        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1695     // Emit an explicit null check because the offset is too large.
1696     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1697     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1698     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1699   }
1700 
1701   DecoratorSet decorators = IN_HEAP;
1702   if (is_volatile) {
1703     decorators |= MO_SEQ_CST;
1704   }
1705   if (needs_patching) {
1706     decorators |= C1_NEEDS_PATCHING;
1707   }
1708 
1709   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1710                   value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1711 }
1712 
1713 // FIXME -- I can't find any other way to pass an address to access_load_at().
1714 class TempResolvedAddress: public Instruction {
1715  public:
1716   TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1717     set_operand(addr);
1718   }
1719   virtual void input_values_do(ValueVisitor*) {}
1720   virtual void visit(InstructionVisitor* v)   {}
1721   virtual const char* name() const  { return "TempResolvedAddress"; }
1722 };
1723 
1724 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
1725   ciType* array_type = array.value()->declared_type();
1726   ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
1727   assert(flat_array_klass->is_loaded(), "must be");
1728 
1729   int array_header_size = flat_array_klass->array_header_in_bytes();
1730   int shift = flat_array_klass->log2_element_size();
1731 
1732 #ifndef _LP64
1733   LIR_Opr index_op = new_register(T_INT);
1734   // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
1735   // the top (shift+1) bits of index_op must be zero, or
1736   // else throw ArrayIndexOutOfBoundsException
1737   if (index.result()->is_constant()) {
1738     jint const_index = index.result()->as_jint();
1739     __ move(LIR_OprFact::intConst(const_index << shift), index_op);
1740   } else {
1741     __ shift_left(index_op, shift, index.result());
1742   }
1743 #else
1744   LIR_Opr index_op = new_register(T_LONG);
1745   if (index.result()->is_constant()) {
1746     jint const_index = index.result()->as_jint();
1747     __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1748   } else {
1749     __ convert(Bytecodes::_i2l, index.result(), index_op);
1750     // Need to shift manually, as LIR_Address can scale only up to 3.
1751     __ shift_left(index_op, shift, index_op);
1752   }
1753 #endif
1754 
1755   LIR_Opr elm_op = new_pointer_register();
1756   LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
1757   __ leal(LIR_OprFact::address(elm_address), elm_op);
1758   return elm_op;
1759 }
1760 
1761 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, int sub_offset) {
1762   assert(field != nullptr, "Need a subelement type specified");
1763 
1764   // Find the starting address of the source (inside the array)
1765   LIR_Opr elm_op = get_and_load_element_address(array, index);
1766 
1767   BasicType subelt_type = field->type()->basic_type();
1768   TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op);
1769   LIRItem elm_item(elm_resolved_addr, this);
1770 
1771   DecoratorSet decorators = IN_HEAP;
1772   access_load_at(decorators, subelt_type,
1773                      elm_item, LIR_OprFact::intConst(sub_offset), result,
1774                      nullptr, nullptr);
1775 
1776   if (field->is_null_free()) {
1777     assert(field->type()->is_loaded(), "Must be");
1778     assert(field->type()->is_inlinetype(), "Must be if loaded");
1779     assert(field->type()->as_inline_klass()->is_initialized(), "Must be");
1780     LabelObj* L_end = new LabelObj();
1781     __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(nullptr));
1782     __ branch(lir_cond_notEqual, L_end->label());
1783     set_in_conditional_code(true);
1784     Constant* default_value = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance()));
1785     if (default_value->is_pinned()) {
1786       __ move(LIR_OprFact::value_type(default_value->type()), result);
1787     } else {
1788       __ move(load_constant(default_value), result);
1789     }
1790     __ branch_destination(L_end->label());
1791     set_in_conditional_code(false);
1792   }
1793 }
1794 
1795 void LIRGenerator::access_flat_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
1796                                           ciField* field, int sub_offset) {
1797   assert(sub_offset == 0 || field != nullptr, "Sanity check");
1798 
1799   // Find the starting address of the source (inside the array)
1800   LIR_Opr elm_op = get_and_load_element_address(array, index);
1801 
1802   ciInlineKlass* elem_klass = nullptr;
1803   if (field != nullptr) {
1804     elem_klass = field->type()->as_inline_klass();
1805   } else {
1806     elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
1807   }
1808   for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1809     ciField* inner_field = elem_klass->nonstatic_field_at(i);
1810     assert(!inner_field->is_flat(), "flat fields must have been expanded");
1811     int obj_offset = inner_field->offset_in_bytes();
1812     int elm_offset = obj_offset - elem_klass->first_field_offset() + sub_offset; // object header is not stored in array.
1813     BasicType field_type = inner_field->type()->basic_type();
1814 
1815     // Types which are smaller than int are still passed in an int register.
1816     BasicType reg_type = field_type;
1817     switch (reg_type) {
1818     case T_BYTE:
1819     case T_BOOLEAN:
1820     case T_SHORT:
1821     case T_CHAR:
1822       reg_type = T_INT;
1823       break;
1824     default:
1825       break;
1826     }
1827 
1828     LIR_Opr temp = new_register(reg_type);
1829     TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1830     LIRItem elm_item(elm_resolved_addr, this);
1831 
1832     DecoratorSet decorators = IN_HEAP;
1833     if (is_load) {
1834       access_load_at(decorators, field_type,
1835                      elm_item, LIR_OprFact::intConst(elm_offset), temp,
1836                      nullptr, nullptr);
1837       access_store_at(decorators, field_type,
1838                       obj_item, LIR_OprFact::intConst(obj_offset), temp,
1839                       nullptr, nullptr);
1840     } else {
1841       access_load_at(decorators, field_type,
1842                      obj_item, LIR_OprFact::intConst(obj_offset), temp,
1843                      nullptr, nullptr);
1844       access_store_at(decorators, field_type,
1845                       elm_item, LIR_OprFact::intConst(elm_offset), temp,
1846                       nullptr, nullptr);
1847     }
1848   }
1849 }
1850 
1851 void LIRGenerator::check_flat_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
1852   LIR_Opr tmp = new_register(T_METADATA);
1853   __ check_flat_array(array, value, tmp, slow_path);
1854 }
1855 
1856 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1857   LabelObj* L_end = new LabelObj();
1858   LIR_Opr tmp = new_register(T_METADATA);
1859   __ check_null_free_array(array.result(), tmp);
1860   __ branch(lir_cond_equal, L_end->label());
1861   __ null_check(value.result(), info);
1862   __ branch_destination(L_end->label());
1863 }
1864 
1865 bool LIRGenerator::needs_flat_array_store_check(StoreIndexed* x) {
1866   if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
1867     ciType* type = x->value()->declared_type();
1868     if (type != nullptr && type->is_klass()) {
1869       ciKlass* klass = type->as_klass();
1870       if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->flat_array())) {
1871         // This is known to be a non-flat object. If the array is a flat array,
1872         // it will be caught by the code generated by array_store_check().
1873         return false;
1874       }
1875     }
1876     // We're not 100% sure, so let's do the flat_array_store_check.
1877     return true;
1878   }
1879   return false;
1880 }
1881 
1882 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1883   return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1884 }
1885 
1886 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1887   assert(x->is_pinned(),"");
1888   assert(x->elt_type() != T_ARRAY, "never used");
1889   bool is_loaded_flat_array = x->array()->is_loaded_flat_array();
1890   bool needs_range_check = x->compute_needs_range_check();
1891   bool use_length = x->length() != nullptr;
1892   bool obj_store = is_reference_type(x->elt_type());
1893   bool needs_store_check = obj_store && !(is_loaded_flat_array && x->is_exact_flat_array_store()) &&
1894                                         (x->value()->as_Constant() == nullptr ||
1895                                          !get_jobject_constant(x->value())->is_null_object());
1896 
1897   LIRItem array(x->array(), this);
1898   LIRItem index(x->index(), this);
1899   LIRItem value(x->value(), this);
1900   LIRItem length(this);
1901 
1902   array.load_item();
1903   index.load_nonconstant();
1904 
1905   if (use_length && needs_range_check) {
1906     length.set_instruction(x->length());
1907     length.load_item();
1908   }
1909 
1910   if (needs_store_check || x->check_boolean()
1911       || is_loaded_flat_array || needs_flat_array_store_check(x) || needs_null_free_array_store_check(x)) {
1912     value.load_item();
1913   } else {
1914     value.load_for_store(x->elt_type());
1915   }
1916 
1917   set_no_result(x);
1918 
1919   // the CodeEmitInfo must be duplicated for each different
1920   // LIR-instruction because spilling can occur anywhere between two
1921   // instructions and so the debug information must be different
1922   CodeEmitInfo* range_check_info = state_for(x);
1923   CodeEmitInfo* null_check_info = nullptr;
1924   if (x->needs_null_check()) {
1925     null_check_info = new CodeEmitInfo(range_check_info);
1926   }
1927 
1928   if (needs_range_check) {
1929     if (use_length) {
1930       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1931       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1932     } else {
1933       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1934       // range_check also does the null check
1935       null_check_info = nullptr;
1936     }
1937   }
1938 
1939   if (x->should_profile()) {
1940     if (x->array()->is_loaded_flat_array()) {
1941       // No need to profile a store to a flat array of known type. This can happen if
1942       // the type only became known after optimizations (for example, after the PhiSimplifier).
1943       x->set_should_profile(false);
1944     } else {
1945       ciMethodData* md = nullptr;
1946       ciArrayLoadStoreData* load_store = nullptr;
1947       profile_array_type(x, md, load_store);
1948       if (x->array()->maybe_null_free_array()) {
1949         profile_null_free_array(array, md, load_store);
1950       }
1951       profile_element_type(x->value(), md, load_store);
1952     }
1953   }
1954 
1955   if (GenerateArrayStoreCheck && needs_store_check) {
1956     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1957     array_store_check(value.result(), array.result(), store_check_info, nullptr, -1);
1958   }
1959 
1960   if (is_loaded_flat_array) {
1961     if (!x->value()->is_null_free()) {
1962       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1963     }
1964     // If array element is an empty inline type, no need to copy anything
1965     if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
1966       access_flat_array(false, array, index, value);
1967     }
1968   } else {
1969     StoreFlattenedArrayStub* slow_path = nullptr;
1970 
1971     if (needs_flat_array_store_check(x)) {
1972       // Check if we indeed have a flat array
1973       index.load_item();
1974       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
1975       check_flat_array(array.result(), value.result(), slow_path);
1976       set_in_conditional_code(true);
1977     } else if (needs_null_free_array_store_check(x)) {
1978       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1979       check_null_free_array(array, value, info);
1980     }
1981 
1982     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1983     if (x->check_boolean()) {
1984       decorators |= C1_MASK_BOOLEAN;
1985     }
1986 
1987     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1988                     nullptr, null_check_info);
1989     if (slow_path != nullptr) {
1990       __ branch_destination(slow_path->continuation());
1991       set_in_conditional_code(false);
1992     }
1993   }
1994 }
1995 
1996 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1997                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1998                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1999   decorators |= ACCESS_READ;
2000   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
2001   if (access.is_raw()) {
2002     _barrier_set->BarrierSetC1::load_at(access, result);
2003   } else {
2004     _barrier_set->load_at(access, result);
2005   }
2006 }
2007 
2008 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
2009                                LIR_Opr addr, LIR_Opr result) {
2010   decorators |= ACCESS_READ;
2011   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
2012   access.set_resolved_addr(addr);
2013   if (access.is_raw()) {
2014     _barrier_set->BarrierSetC1::load(access, result);
2015   } else {
2016     _barrier_set->load(access, result);
2017   }
2018 }
2019 
2020 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
2021                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
2022                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
2023   decorators |= ACCESS_WRITE;
2024   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
2025   if (access.is_raw()) {
2026     _barrier_set->BarrierSetC1::store_at(access, value);
2027   } else {
2028     _barrier_set->store_at(access, value);
2029   }
2030 }
2031 
2032 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
2033                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
2034   decorators |= ACCESS_READ;
2035   decorators |= ACCESS_WRITE;
2036   // Atomic operations are SEQ_CST by default
2037   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2038   LIRAccess access(this, decorators, base, offset, type);
2039   if (access.is_raw()) {
2040     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
2041   } else {
2042     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
2043   }
2044 }
2045 
2046 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
2047                                             LIRItem& base, LIRItem& offset, LIRItem& value) {
2048   decorators |= ACCESS_READ;
2049   decorators |= ACCESS_WRITE;
2050   // Atomic operations are SEQ_CST by default
2051   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2052   LIRAccess access(this, decorators, base, offset, type);
2053   if (access.is_raw()) {
2054     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
2055   } else {
2056     return _barrier_set->atomic_xchg_at(access, value);
2057   }
2058 }
2059 
2060 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
2061                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
2062   decorators |= ACCESS_READ;
2063   decorators |= ACCESS_WRITE;
2064   // Atomic operations are SEQ_CST by default
2065   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2066   LIRAccess access(this, decorators, base, offset, type);
2067   if (access.is_raw()) {
2068     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
2069   } else {
2070     return _barrier_set->atomic_add_at(access, value);
2071   }
2072 }
2073 
2074 bool LIRGenerator::inline_type_field_access_prolog(AccessField* x) {
2075   ciField* field = x->field();
2076   assert(!field->is_flat(), "Flattened field access should have been expanded");
2077   if (!field->is_null_free()) {
2078     return true; // Not an inline type field
2079   }
2080   // Deoptimize if the access is non-static and requires patching (holder not loaded
2081   // or not accessible) because then we only have partial field information and the
2082   // field could be flat (see ciField constructor).
2083   bool could_be_flat = !x->is_static() && x->needs_patching();
2084   // Deoptimize if we load from a static field with an uninitialized type because we
2085   // need to throw an exception if initialization of the type failed.
2086   bool not_initialized = x->is_static() && x->as_LoadField() != nullptr &&
2087       !field->type()->as_instance_klass()->is_initialized();
2088   if (could_be_flat || not_initialized) {
2089     CodeEmitInfo* info = state_for(x, x->state_before());
2090     CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
2091                                         Deoptimization::Reason_unloaded,
2092                                         Deoptimization::Action_make_not_entrant);
2093     __ jump(stub);
2094     return false;
2095   }
2096   return true;
2097 }
2098 
2099 void LIRGenerator::do_LoadField(LoadField* x) {
2100   bool needs_patching = x->needs_patching();
2101   bool is_volatile = x->field()->is_volatile();
2102   BasicType field_type = x->field_type();
2103 
2104   CodeEmitInfo* info = nullptr;
2105   if (needs_patching) {
2106     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
2107     info = state_for(x, x->state_before());
2108   } else if (x->needs_null_check()) {
2109     NullCheck* nc = x->explicit_null_check();
2110     if (nc == nullptr) {
2111       info = state_for(x);
2112     } else {
2113       info = state_for(nc);
2114     }
2115   }
2116 
2117   LIRItem object(x->obj(), this);
2118 
2119   object.load_item();
2120 
2121 #ifndef PRODUCT
2122   if (PrintNotLoaded && needs_patching) {
2123     tty->print_cr("   ###class not loaded at load_%s bci %d",
2124                   x->is_static() ?  "static" : "field", x->printable_bci());
2125   }
2126 #endif
2127 
2128   if (!inline_type_field_access_prolog(x)) {
2129     // Field load will always deopt due to unloaded field or holder klass
2130     LIR_Opr result = rlock_result(x, field_type);
2131     __ move(LIR_OprFact::oopConst(nullptr), result);
2132     return;
2133   }
2134 
2135   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
2136   if (x->needs_null_check() &&
2137       (needs_patching ||
2138        MacroAssembler::needs_explicit_null_check(x->offset()) ||
2139        stress_deopt)) {
2140     LIR_Opr obj = object.result();
2141     if (stress_deopt) {
2142       obj = new_register(T_OBJECT);
2143       __ move(LIR_OprFact::oopConst(nullptr), obj);
2144     }
2145     // Emit an explicit null check because the offset is too large.
2146     // If the class is not loaded and the object is null, we need to deoptimize to throw a
2147     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2148     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2149   }
2150 
2151   DecoratorSet decorators = IN_HEAP;
2152   if (is_volatile) {
2153     decorators |= MO_SEQ_CST;
2154   }
2155   if (needs_patching) {
2156     decorators |= C1_NEEDS_PATCHING;
2157   }
2158 
2159   LIR_Opr result = rlock_result(x, field_type);
2160   access_load_at(decorators, field_type,
2161                  object, LIR_OprFact::intConst(x->offset()), result,
2162                  info ? new CodeEmitInfo(info) : nullptr, info);
2163 
2164   ciField* field = x->field();
2165   if (field->is_null_free()) {
2166     // Load from non-flat inline type field requires
2167     // a null check to replace null with the default value.
2168     ciInstanceKlass* holder = field->holder();
2169     if (field->is_static() && holder->is_loaded()) {
2170       ciObject* val = holder->java_mirror()->field_value(field).as_object();
2171       if (!val->is_null_object()) {
2172         // Static field is initialized, we don't need to perform a null check.
2173         return;
2174       }
2175     }
2176     ciInlineKlass* inline_klass = field->type()->as_inline_klass();
2177     if (inline_klass->is_initialized()) {
2178       LabelObj* L_end = new LabelObj();
2179       __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(nullptr));
2180       __ branch(lir_cond_notEqual, L_end->label());
2181       set_in_conditional_code(true);
2182       Constant* default_value = new Constant(new InstanceConstant(inline_klass->default_instance()));
2183       if (default_value->is_pinned()) {
2184         __ move(LIR_OprFact::value_type(default_value->type()), result);
2185       } else {
2186         __ move(load_constant(default_value), result);
2187       }
2188       __ branch_destination(L_end->label());
2189       set_in_conditional_code(false);
2190     } else {
2191       info = state_for(x, x->state_before());
2192       __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(nullptr));
2193       __ branch(lir_cond_equal, new DeoptimizeStub(info, Deoptimization::Reason_uninitialized,
2194                                                          Deoptimization::Action_make_not_entrant));
2195     }
2196   }
2197 }
2198 
2199 // int/long jdk.internal.util.Preconditions.checkIndex
2200 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
2201   assert(x->number_of_arguments() == 3, "wrong type");
2202   LIRItem index(x->argument_at(0), this);
2203   LIRItem length(x->argument_at(1), this);
2204   LIRItem oobef(x->argument_at(2), this);
2205 
2206   index.load_item();
2207   length.load_item();
2208   oobef.load_item();
2209 
2210   LIR_Opr result = rlock_result(x);
2211   // x->state() is created from copy_state_for_exception, it does not contains arguments
2212   // we should prepare them before entering into interpreter mode due to deoptimization.
2213   ValueStack* state = x->state();
2214   for (int i = 0; i < x->number_of_arguments(); i++) {
2215     Value arg = x->argument_at(i);
2216     state->push(arg->type(), arg);
2217   }
2218   CodeEmitInfo* info = state_for(x, state);
2219 
2220   LIR_Opr len = length.result();
2221   LIR_Opr zero;
2222   if (type == T_INT) {
2223     zero = LIR_OprFact::intConst(0);
2224     if (length.result()->is_constant()){
2225       len = LIR_OprFact::intConst(length.result()->as_jint());
2226     }
2227   } else {
2228     assert(type == T_LONG, "sanity check");
2229     zero = LIR_OprFact::longConst(0);
2230     if (length.result()->is_constant()){
2231       len = LIR_OprFact::longConst(length.result()->as_jlong());
2232     }
2233   }
2234   // C1 can not handle the case that comparing index with constant value while condition
2235   // is neither lir_cond_equal nor lir_cond_notEqual, see LIR_Assembler::comp_op.
2236   LIR_Opr zero_reg = new_register(type);
2237   __ move(zero, zero_reg);
2238 #if defined(X86) && !defined(_LP64)
2239   // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
2240   LIR_Opr index_copy = new_register(index.type());
2241   // index >= 0
2242   __ move(index.result(), index_copy);
2243   __ cmp(lir_cond_less, index_copy, zero_reg);
2244   __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2245                                                     Deoptimization::Action_make_not_entrant));
2246   // index < length
2247   __ move(index.result(), index_copy);
2248   __ cmp(lir_cond_greaterEqual, index_copy, len);
2249   __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2250                                                             Deoptimization::Action_make_not_entrant));
2251 #else
2252   // index >= 0
2253   __ cmp(lir_cond_less, index.result(), zero_reg);
2254   __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2255                                                     Deoptimization::Action_make_not_entrant));
2256   // index < length
2257   __ cmp(lir_cond_greaterEqual, index.result(), len);
2258   __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check,
2259                                                             Deoptimization::Action_make_not_entrant));
2260 #endif
2261   __ move(index.result(), result);
2262 }
2263 
2264 //------------------------array access--------------------------------------
2265 
2266 
2267 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
2268   LIRItem array(x->array(), this);
2269   array.load_item();
2270   LIR_Opr reg = rlock_result(x);
2271 
2272   CodeEmitInfo* info = nullptr;
2273   if (x->needs_null_check()) {
2274     NullCheck* nc = x->explicit_null_check();
2275     if (nc == nullptr) {
2276       info = state_for(x);
2277     } else {
2278       info = state_for(nc);
2279     }
2280     if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
2281       LIR_Opr obj = new_register(T_OBJECT);
2282       __ move(LIR_OprFact::oopConst(nullptr), obj);
2283       __ null_check(obj, new CodeEmitInfo(info));
2284     }
2285   }
2286   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
2287 }
2288 
2289 
2290 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
2291   bool use_length = x->length() != nullptr;
2292   LIRItem array(x->array(), this);
2293   LIRItem index(x->index(), this);
2294   LIRItem length(this);
2295   bool needs_range_check = x->compute_needs_range_check();
2296 
2297   if (use_length && needs_range_check) {
2298     length.set_instruction(x->length());
2299     length.load_item();
2300   }
2301 
2302   array.load_item();
2303   if (index.is_constant() && can_inline_as_constant(x->index())) {
2304     // let it be a constant
2305     index.dont_load_item();
2306   } else {
2307     index.load_item();
2308   }
2309 
2310   CodeEmitInfo* range_check_info = state_for(x);
2311   CodeEmitInfo* null_check_info = nullptr;
2312   if (x->needs_null_check()) {
2313     NullCheck* nc = x->explicit_null_check();
2314     if (nc != nullptr) {
2315       null_check_info = state_for(nc);
2316     } else {
2317       null_check_info = range_check_info;
2318     }
2319     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
2320       LIR_Opr obj = new_register(T_OBJECT);
2321       __ move(LIR_OprFact::oopConst(nullptr), obj);
2322       __ null_check(obj, new CodeEmitInfo(null_check_info));
2323     }
2324   }
2325 
2326   if (needs_range_check) {
2327     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2328       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2329     } else if (use_length) {
2330       // TODO: use a (modified) version of array_range_check that does not require a
2331       //       constant length to be loaded to a register
2332       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2333       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2334     } else {
2335       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2336       // The range check performs the null check, so clear it out for the load
2337       null_check_info = nullptr;
2338     }
2339   }
2340 
2341   ciMethodData* md = nullptr;
2342   ciArrayLoadStoreData* load_store = nullptr;
2343   if (x->should_profile()) {
2344     if (x->array()->is_loaded_flat_array()) {
2345       // No need to profile a load from a flat array of known type. This can happen if
2346       // the type only became known after optimizations (for example, after the PhiSimplifier).
2347       x->set_should_profile(false);
2348     } else {
2349       profile_array_type(x, md, load_store);
2350     }
2351   }
2352 
2353   Value element;
2354   if (x->vt() != nullptr) {
2355     assert(x->array()->is_loaded_flat_array(), "must be");
2356     // Find the destination address (of the NewInlineTypeInstance).
2357     LIRItem obj_item(x->vt(), this);
2358 
2359     access_flat_array(true, array, index, obj_item,
2360                       x->delayed() == nullptr ? 0 : x->delayed()->field(),
2361                       x->delayed() == nullptr ? 0 : x->delayed()->offset());
2362     set_no_result(x);
2363   } else if (x->delayed() != nullptr) {
2364     assert(x->array()->is_loaded_flat_array(), "must be");
2365     LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
2366     access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
2367   } else if (x->array() != nullptr && x->array()->is_loaded_flat_array() &&
2368              x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_initialized() &&
2369              x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
2370     // Load the default instance instead of reading the element
2371     ciInlineKlass* elem_klass = x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
2372     LIR_Opr result = rlock_result(x, x->elt_type());
2373     assert(elem_klass->is_initialized(), "Must be");
2374     Constant* default_value = new Constant(new InstanceConstant(elem_klass->default_instance()));
2375     if (default_value->is_pinned()) {
2376       __ move(LIR_OprFact::value_type(default_value->type()), result);
2377     } else {
2378       __ move(load_constant(default_value), result);
2379     }
2380   } else {
2381     LIR_Opr result = rlock_result(x, x->elt_type());
2382     LoadFlattenedArrayStub* slow_path = nullptr;
2383 
2384     if (x->should_profile() && x->array()->maybe_null_free_array()) {
2385       profile_null_free_array(array, md, load_store);
2386     }
2387 
2388     if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
2389       assert(x->delayed() == nullptr, "Delayed LoadIndexed only apply to loaded_flat_arrays");
2390       index.load_item();
2391       // if we are loading from a flat array, load it using a runtime call
2392       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
2393       check_flat_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
2394       set_in_conditional_code(true);
2395     }
2396 
2397     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2398     access_load_at(decorators, x->elt_type(),
2399                    array, index.result(), result,
2400                    nullptr, null_check_info);
2401 
2402     if (slow_path != nullptr) {
2403       __ branch_destination(slow_path->continuation());
2404       set_in_conditional_code(false);
2405     }
2406 
2407     element = x;
2408   }
2409 
2410   if (x->should_profile()) {
2411     profile_element_type(element, md, load_store);
2412   }
2413 }
2414 
2415 void LIRGenerator::do_Deoptimize(Deoptimize* x) {
2416   // This happens only when a class X uses the withfield/aconst_init bytecode
2417   // to refer to an inline class V, where V has not yet been loaded/resolved.
2418   // This is not a common case. Let's just deoptimize.
2419   CodeEmitInfo* info = state_for(x, x->state_before());
2420   CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
2421                                       Deoptimization::Reason_unloaded,
2422                                       Deoptimization::Action_make_not_entrant);
2423   __ jump(stub);
2424   LIR_Opr reg = rlock_result(x, T_OBJECT);
2425   __ move(LIR_OprFact::oopConst(nullptr), reg);
2426 }
2427 
2428 void LIRGenerator::do_NullCheck(NullCheck* x) {
2429   if (x->can_trap()) {
2430     LIRItem value(x->obj(), this);
2431     value.load_item();
2432     CodeEmitInfo* info = state_for(x);
2433     __ null_check(value.result(), info);
2434   }
2435 }
2436 
2437 
2438 void LIRGenerator::do_TypeCast(TypeCast* x) {
2439   LIRItem value(x->obj(), this);
2440   value.load_item();
2441   // the result is the same as from the node we are casting
2442   set_result(x, value.result());
2443 }
2444 
2445 
2446 void LIRGenerator::do_Throw(Throw* x) {
2447   LIRItem exception(x->exception(), this);
2448   exception.load_item();
2449   set_no_result(x);
2450   LIR_Opr exception_opr = exception.result();
2451   CodeEmitInfo* info = state_for(x, x->state());
2452 
2453 #ifndef PRODUCT
2454   if (PrintC1Statistics) {
2455     increment_counter(Runtime1::throw_count_address(), T_INT);
2456   }
2457 #endif
2458 
2459   // check if the instruction has an xhandler in any of the nested scopes
2460   bool unwind = false;
2461   if (info->exception_handlers()->length() == 0) {
2462     // this throw is not inside an xhandler
2463     unwind = true;
2464   } else {
2465     // get some idea of the throw type
2466     bool type_is_exact = true;
2467     ciType* throw_type = x->exception()->exact_type();
2468     if (throw_type == nullptr) {
2469       type_is_exact = false;
2470       throw_type = x->exception()->declared_type();
2471     }
2472     if (throw_type != nullptr && throw_type->is_instance_klass()) {
2473       ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2474       unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2475     }
2476   }
2477 
2478   // do null check before moving exception oop into fixed register
2479   // to avoid a fixed interval with an oop during the null check.
2480   // Use a copy of the CodeEmitInfo because debug information is
2481   // different for null_check and throw.
2482   if (x->exception()->as_NewInstance() == nullptr && x->exception()->as_ExceptionObject() == nullptr) {
2483     // if the exception object wasn't created using new then it might be null.
2484     __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2485   }
2486 
2487   if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2488     // we need to go through the exception lookup path to get JVMTI
2489     // notification done
2490     unwind = false;
2491   }
2492 
2493   // move exception oop into fixed register
2494   __ move(exception_opr, exceptionOopOpr());
2495 
2496   if (unwind) {
2497     __ unwind_exception(exceptionOopOpr());
2498   } else {
2499     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2500   }
2501 }
2502 
2503 
2504 void LIRGenerator::do_RoundFP(RoundFP* x) {
2505   assert(strict_fp_requires_explicit_rounding, "not required");
2506 
2507   LIRItem input(x->input(), this);
2508   input.load_item();
2509   LIR_Opr input_opr = input.result();
2510   assert(input_opr->is_register(), "why round if value is not in a register?");
2511   assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2512   if (input_opr->is_single_fpu()) {
2513     set_result(x, round_item(input_opr)); // This code path not currently taken
2514   } else {
2515     LIR_Opr result = new_register(T_DOUBLE);
2516     set_vreg_flag(result, must_start_in_memory);
2517     __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2518     set_result(x, result);
2519   }
2520 }
2521 
2522 
2523 void LIRGenerator::do_UnsafeGet(UnsafeGet* x) {
2524   BasicType type = x->basic_type();
2525   LIRItem src(x->object(), this);
2526   LIRItem off(x->offset(), this);
2527 
2528   off.load_item();
2529   src.load_item();
2530 
2531   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2532 
2533   if (x->is_volatile()) {
2534     decorators |= MO_SEQ_CST;
2535   }
2536   if (type == T_BOOLEAN) {
2537     decorators |= C1_MASK_BOOLEAN;
2538   }
2539   if (is_reference_type(type)) {
2540     decorators |= ON_UNKNOWN_OOP_REF;
2541   }
2542 
2543   LIR_Opr result = rlock_result(x, type);
2544   if (!x->is_raw()) {
2545     access_load_at(decorators, type, src, off.result(), result);
2546   } else {
2547     // Currently it is only used in GraphBuilder::setup_osr_entry_block.
2548     // It reads the value from [src + offset] directly.
2549 #ifdef _LP64
2550     LIR_Opr offset = new_register(T_LONG);
2551     __ convert(Bytecodes::_i2l, off.result(), offset);
2552 #else
2553     LIR_Opr offset = off.result();
2554 #endif
2555     LIR_Address* addr = new LIR_Address(src.result(), offset, type);
2556     if (is_reference_type(type)) {
2557       __ move_wide(addr, result);
2558     } else {
2559       __ move(addr, result);
2560     }
2561   }
2562 }
2563 
2564 
2565 void LIRGenerator::do_UnsafePut(UnsafePut* x) {
2566   BasicType type = x->basic_type();
2567   LIRItem src(x->object(), this);
2568   LIRItem off(x->offset(), this);
2569   LIRItem data(x->value(), this);
2570 
2571   src.load_item();
2572   if (type == T_BOOLEAN || type == T_BYTE) {
2573     data.load_byte_item();
2574   } else {
2575     data.load_item();
2576   }
2577   off.load_item();
2578 
2579   set_no_result(x);
2580 
2581   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2582   if (is_reference_type(type)) {
2583     decorators |= ON_UNKNOWN_OOP_REF;
2584   }
2585   if (x->is_volatile()) {
2586     decorators |= MO_SEQ_CST;
2587   }
2588   access_store_at(decorators, type, src, off.result(), data.result());
2589 }
2590 
2591 void LIRGenerator::do_UnsafeGetAndSet(UnsafeGetAndSet* x) {
2592   BasicType type = x->basic_type();
2593   LIRItem src(x->object(), this);
2594   LIRItem off(x->offset(), this);
2595   LIRItem value(x->value(), this);
2596 
2597   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
2598 
2599   if (is_reference_type(type)) {
2600     decorators |= ON_UNKNOWN_OOP_REF;
2601   }
2602 
2603   LIR_Opr result;
2604   if (x->is_add()) {
2605     result = access_atomic_add_at(decorators, type, src, off, value);
2606   } else {
2607     result = access_atomic_xchg_at(decorators, type, src, off, value);
2608   }
2609   set_result(x, result);
2610 }
2611 
2612 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2613   int lng = x->length();
2614 
2615   for (int i = 0; i < lng; i++) {
2616     C1SwitchRange* one_range = x->at(i);
2617     int low_key = one_range->low_key();
2618     int high_key = one_range->high_key();
2619     BlockBegin* dest = one_range->sux();
2620     if (low_key == high_key) {
2621       __ cmp(lir_cond_equal, value, low_key);
2622       __ branch(lir_cond_equal, dest);
2623     } else if (high_key - low_key == 1) {
2624       __ cmp(lir_cond_equal, value, low_key);
2625       __ branch(lir_cond_equal, dest);
2626       __ cmp(lir_cond_equal, value, high_key);
2627       __ branch(lir_cond_equal, dest);
2628     } else {
2629       LabelObj* L = new LabelObj();
2630       __ cmp(lir_cond_less, value, low_key);
2631       __ branch(lir_cond_less, L->label());
2632       __ cmp(lir_cond_lessEqual, value, high_key);
2633       __ branch(lir_cond_lessEqual, dest);
2634       __ branch_destination(L->label());
2635     }
2636   }
2637   __ jump(default_sux);
2638 }
2639 
2640 
2641 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2642   SwitchRangeList* res = new SwitchRangeList();
2643   int len = x->length();
2644   if (len > 0) {
2645     BlockBegin* sux = x->sux_at(0);
2646     int low = x->lo_key();
2647     BlockBegin* default_sux = x->default_sux();
2648     C1SwitchRange* range = new C1SwitchRange(low, sux);
2649     for (int i = 0; i < len; i++) {
2650       int key = low + i;
2651       BlockBegin* new_sux = x->sux_at(i);
2652       if (sux == new_sux) {
2653         // still in same range
2654         range->set_high_key(key);
2655       } else {
2656         // skip tests which explicitly dispatch to the default
2657         if (sux != default_sux) {
2658           res->append(range);
2659         }
2660         range = new C1SwitchRange(key, new_sux);
2661       }
2662       sux = new_sux;
2663     }
2664     if (res->length() == 0 || res->last() != range)  res->append(range);
2665   }
2666   return res;
2667 }
2668 
2669 
2670 // we expect the keys to be sorted by increasing value
2671 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2672   SwitchRangeList* res = new SwitchRangeList();
2673   int len = x->length();
2674   if (len > 0) {
2675     BlockBegin* default_sux = x->default_sux();
2676     int key = x->key_at(0);
2677     BlockBegin* sux = x->sux_at(0);
2678     C1SwitchRange* range = new C1SwitchRange(key, sux);
2679     for (int i = 1; i < len; i++) {
2680       int new_key = x->key_at(i);
2681       BlockBegin* new_sux = x->sux_at(i);
2682       if (key+1 == new_key && sux == new_sux) {
2683         // still in same range
2684         range->set_high_key(new_key);
2685       } else {
2686         // skip tests which explicitly dispatch to the default
2687         if (range->sux() != default_sux) {
2688           res->append(range);
2689         }
2690         range = new C1SwitchRange(new_key, new_sux);
2691       }
2692       key = new_key;
2693       sux = new_sux;
2694     }
2695     if (res->length() == 0 || res->last() != range)  res->append(range);
2696   }
2697   return res;
2698 }
2699 
2700 
2701 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2702   LIRItem tag(x->tag(), this);
2703   tag.load_item();
2704   set_no_result(x);
2705 
2706   if (x->is_safepoint()) {
2707     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2708   }
2709 
2710   // move values into phi locations
2711   move_to_phi(x->state());
2712 
2713   int lo_key = x->lo_key();
2714   int len = x->length();
2715   assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
2716   LIR_Opr value = tag.result();
2717 
2718   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2719     ciMethod* method = x->state()->scope()->method();
2720     ciMethodData* md = method->method_data_or_null();
2721     assert(md != nullptr, "Sanity");
2722     ciProfileData* data = md->bci_to_data(x->state()->bci());
2723     assert(data != nullptr, "must have profiling data");
2724     assert(data->is_MultiBranchData(), "bad profile data?");
2725     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2726     LIR_Opr md_reg = new_register(T_METADATA);
2727     __ metadata2reg(md->constant_encoding(), md_reg);
2728     LIR_Opr data_offset_reg = new_pointer_register();
2729     LIR_Opr tmp_reg = new_pointer_register();
2730 
2731     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2732     for (int i = 0; i < len; i++) {
2733       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2734       __ cmp(lir_cond_equal, value, i + lo_key);
2735       __ move(data_offset_reg, tmp_reg);
2736       __ cmove(lir_cond_equal,
2737                LIR_OprFact::intptrConst(count_offset),
2738                tmp_reg,
2739                data_offset_reg, T_INT);
2740     }
2741 
2742     LIR_Opr data_reg = new_pointer_register();
2743     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2744     __ move(data_addr, data_reg);
2745     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2746     __ move(data_reg, data_addr);
2747   }
2748 
2749   if (UseTableRanges) {
2750     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2751   } else {
2752     for (int i = 0; i < len; i++) {
2753       __ cmp(lir_cond_equal, value, i + lo_key);
2754       __ branch(lir_cond_equal, x->sux_at(i));
2755     }
2756     __ jump(x->default_sux());
2757   }
2758 }
2759 
2760 
2761 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2762   LIRItem tag(x->tag(), this);
2763   tag.load_item();
2764   set_no_result(x);
2765 
2766   if (x->is_safepoint()) {
2767     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2768   }
2769 
2770   // move values into phi locations
2771   move_to_phi(x->state());
2772 
2773   LIR_Opr value = tag.result();
2774   int len = x->length();
2775 
2776   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2777     ciMethod* method = x->state()->scope()->method();
2778     ciMethodData* md = method->method_data_or_null();
2779     assert(md != nullptr, "Sanity");
2780     ciProfileData* data = md->bci_to_data(x->state()->bci());
2781     assert(data != nullptr, "must have profiling data");
2782     assert(data->is_MultiBranchData(), "bad profile data?");
2783     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2784     LIR_Opr md_reg = new_register(T_METADATA);
2785     __ metadata2reg(md->constant_encoding(), md_reg);
2786     LIR_Opr data_offset_reg = new_pointer_register();
2787     LIR_Opr tmp_reg = new_pointer_register();
2788 
2789     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2790     for (int i = 0; i < len; i++) {
2791       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2792       __ cmp(lir_cond_equal, value, x->key_at(i));
2793       __ move(data_offset_reg, tmp_reg);
2794       __ cmove(lir_cond_equal,
2795                LIR_OprFact::intptrConst(count_offset),
2796                tmp_reg,
2797                data_offset_reg, T_INT);
2798     }
2799 
2800     LIR_Opr data_reg = new_pointer_register();
2801     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2802     __ move(data_addr, data_reg);
2803     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2804     __ move(data_reg, data_addr);
2805   }
2806 
2807   if (UseTableRanges) {
2808     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2809   } else {
2810     int len = x->length();
2811     for (int i = 0; i < len; i++) {
2812       __ cmp(lir_cond_equal, value, x->key_at(i));
2813       __ branch(lir_cond_equal, x->sux_at(i));
2814     }
2815     __ jump(x->default_sux());
2816   }
2817 }
2818 
2819 
2820 void LIRGenerator::do_Goto(Goto* x) {
2821   set_no_result(x);
2822 
2823   if (block()->next()->as_OsrEntry()) {
2824     // need to free up storage used for OSR entry point
2825     LIR_Opr osrBuffer = block()->next()->operand();
2826     BasicTypeList signature;
2827     signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2828     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2829     __ move(osrBuffer, cc->args()->at(0));
2830     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2831                          getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2832   }
2833 
2834   if (x->is_safepoint()) {
2835     ValueStack* state = x->state_before() ? x->state_before() : x->state();
2836 
2837     // increment backedge counter if needed
2838     CodeEmitInfo* info = state_for(x, state);
2839     increment_backedge_counter(info, x->profiled_bci());
2840     CodeEmitInfo* safepoint_info = state_for(x, state);
2841     __ safepoint(safepoint_poll_register(), safepoint_info);
2842   }
2843 
2844   // Gotos can be folded Ifs, handle this case.
2845   if (x->should_profile()) {
2846     ciMethod* method = x->profiled_method();
2847     assert(method != nullptr, "method should be set if branch is profiled");
2848     ciMethodData* md = method->method_data_or_null();
2849     assert(md != nullptr, "Sanity");
2850     ciProfileData* data = md->bci_to_data(x->profiled_bci());
2851     assert(data != nullptr, "must have profiling data");
2852     int offset;
2853     if (x->direction() == Goto::taken) {
2854       assert(data->is_BranchData(), "need BranchData for two-way branches");
2855       offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2856     } else if (x->direction() == Goto::not_taken) {
2857       assert(data->is_BranchData(), "need BranchData for two-way branches");
2858       offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2859     } else {
2860       assert(data->is_JumpData(), "need JumpData for branches");
2861       offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2862     }
2863     LIR_Opr md_reg = new_register(T_METADATA);
2864     __ metadata2reg(md->constant_encoding(), md_reg);
2865 
2866     increment_counter(new LIR_Address(md_reg, offset,
2867                                       NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2868   }
2869 
2870   // emit phi-instruction move after safepoint since this simplifies
2871   // describing the state as the safepoint.
2872   move_to_phi(x->state());
2873 
2874   __ jump(x->default_sux());
2875 }
2876 
2877 /**
2878  * Emit profiling code if needed for arguments, parameters, return value types
2879  *
2880  * @param md                    MDO the code will update at runtime
2881  * @param md_base_offset        common offset in the MDO for this profile and subsequent ones
2882  * @param md_offset             offset in the MDO (on top of md_base_offset) for this profile
2883  * @param profiled_k            current profile
2884  * @param obj                   IR node for the object to be profiled
2885  * @param mdp                   register to hold the pointer inside the MDO (md + md_base_offset).
2886  *                              Set once we find an update to make and use for next ones.
2887  * @param not_null              true if we know obj cannot be null
2888  * @param signature_at_call_k   signature at call for obj
2889  * @param callee_signature_k    signature of callee for obj
2890  *                              at call and callee signatures differ at method handle call
2891  * @return                      the only klass we know will ever be seen at this profile point
2892  */
2893 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2894                                     Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2895                                     ciKlass* callee_signature_k) {
2896   ciKlass* result = nullptr;
2897   bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2898   bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2899   // known not to be null or null bit already set and already set to
2900   // unknown: nothing we can do to improve profiling
2901   if (!do_null && !do_update) {
2902     return result;
2903   }
2904 
2905   ciKlass* exact_klass = nullptr;
2906   Compilation* comp = Compilation::current();
2907   if (do_update) {
2908     // try to find exact type, using CHA if possible, so that loading
2909     // the klass from the object can be avoided
2910     ciType* type = obj->exact_type();
2911     if (type == nullptr) {
2912       type = obj->declared_type();
2913       type = comp->cha_exact_type(type);
2914     }
2915     assert(type == nullptr || type->is_klass(), "type should be class");
2916     exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2917 
2918     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2919   }
2920 
2921   if (!do_null && !do_update) {
2922     return result;
2923   }
2924 
2925   ciKlass* exact_signature_k = nullptr;
2926   if (do_update && signature_at_call_k != nullptr) {
2927     // Is the type from the signature exact (the only one possible)?
2928     exact_signature_k = signature_at_call_k->exact_klass();
2929     if (exact_signature_k == nullptr) {
2930       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2931     } else {
2932       result = exact_signature_k;
2933       // Known statically. No need to emit any code: prevent
2934       // LIR_Assembler::emit_profile_type() from emitting useless code
2935       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2936     }
2937     // exact_klass and exact_signature_k can be both non null but
2938     // different if exact_klass is loaded after the ciObject for
2939     // exact_signature_k is created.
2940     if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2941       // sometimes the type of the signature is better than the best type
2942       // the compiler has
2943       exact_klass = exact_signature_k;
2944     }
2945     if (callee_signature_k != nullptr &&
2946         callee_signature_k != signature_at_call_k) {
2947       ciKlass* improved_klass = callee_signature_k->exact_klass();
2948       if (improved_klass == nullptr) {
2949         improved_klass = comp->cha_exact_type(callee_signature_k);
2950       }
2951       if (exact_klass == nullptr && improved_klass != nullptr && exact_klass != improved_klass) {
2952         exact_klass = exact_signature_k;
2953       }
2954     }
2955     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2956   }
2957 
2958   if (!do_null && !do_update) {
2959     return result;
2960   }
2961 
2962   if (mdp == LIR_OprFact::illegalOpr) {
2963     mdp = new_register(T_METADATA);
2964     __ metadata2reg(md->constant_encoding(), mdp);
2965     if (md_base_offset != 0) {
2966       LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2967       mdp = new_pointer_register();
2968       __ leal(LIR_OprFact::address(base_type_address), mdp);
2969     }
2970   }
2971   LIRItem value(obj, this);
2972   value.load_item();
2973   __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2974                   value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != nullptr);
2975   return result;
2976 }
2977 
2978 // profile parameters on entry to the root of the compilation
2979 void LIRGenerator::profile_parameters(Base* x) {
2980   if (compilation()->profile_parameters()) {
2981     CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2982     ciMethodData* md = scope()->method()->method_data_or_null();
2983     assert(md != nullptr, "Sanity");
2984 
2985     if (md->parameters_type_data() != nullptr) {
2986       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2987       ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
2988       LIR_Opr mdp = LIR_OprFact::illegalOpr;
2989       for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2990         LIR_Opr src = args->at(i);
2991         assert(!src->is_illegal(), "check");
2992         BasicType t = src->type();
2993         if (is_reference_type(t)) {
2994           intptr_t profiled_k = parameters->type(j);
2995           Local* local = x->state()->local_at(java_index)->as_Local();
2996           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2997                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2998                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2999           // If the profile is known statically set it once for all and do not emit any code
3000           if (exact != nullptr) {
3001             md->set_parameter_type(j, exact);
3002           }
3003           j++;
3004         }
3005         java_index += type2size[t];
3006       }
3007     }
3008   }
3009 }
3010 
3011 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
3012   assert(md != nullptr && data != nullptr, "should have been initialized");
3013   LIR_Opr mdp = new_register(T_METADATA);
3014   __ metadata2reg(md->constant_encoding(), mdp);
3015   LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
3016   LIR_Opr flags = new_register(T_INT);
3017   __ move(addr, flags);
3018   if (condition != lir_cond_always) {
3019     LIR_Opr update = new_register(T_INT);
3020     __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
3021   } else {
3022     __ logical_or(flags, LIR_OprFact::intConst(flag), flags);
3023   }
3024   __ store(flags, addr);
3025 }
3026 
3027 void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ciArrayLoadStoreData* load_store) {
3028   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3029   LabelObj* L_end = new LabelObj();
3030   LIR_Opr tmp = new_register(T_METADATA);
3031   __ check_null_free_array(array.result(), tmp);
3032 
3033   profile_flags(md, load_store, ArrayLoadStoreData::null_free_array_byte_constant(), lir_cond_equal);
3034 }
3035 
3036 void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ciArrayLoadStoreData*& load_store) {
3037   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3038   int bci = x->profiled_bci();
3039   md = x->profiled_method()->method_data();
3040   assert(md != nullptr, "Sanity");
3041   ciProfileData* data = md->bci_to_data(bci);
3042   assert(data != nullptr && data->is_ArrayLoadStoreData(), "incorrect profiling entry");
3043   load_store = (ciArrayLoadStoreData*)data;
3044   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3045   profile_type(md, md->byte_offset_of_slot(load_store, ArrayLoadStoreData::array_offset()), 0,
3046                load_store->array()->type(), x->array(), mdp, true, nullptr, nullptr);
3047 }
3048 
3049 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadStoreData* load_store) {
3050   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3051   assert(md != nullptr && load_store != nullptr, "should have been initialized");
3052   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3053   profile_type(md, md->byte_offset_of_slot(load_store, ArrayLoadStoreData::element_offset()), 0,
3054                load_store->element()->type(), element, mdp, false, nullptr, nullptr);
3055 }
3056 
3057 void LIRGenerator::do_Base(Base* x) {
3058   __ std_entry(LIR_OprFact::illegalOpr);
3059   // Emit moves from physical registers / stack slots to virtual registers
3060   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3061   IRScope* irScope = compilation()->hir()->top_scope();
3062   int java_index = 0;
3063   for (int i = 0; i < args->length(); i++) {
3064     LIR_Opr src = args->at(i);
3065     assert(!src->is_illegal(), "check");
3066     BasicType t = src->type();
3067 
3068     // Types which are smaller than int are passed as int, so
3069     // correct the type which passed.
3070     switch (t) {
3071     case T_BYTE:
3072     case T_BOOLEAN:
3073     case T_SHORT:
3074     case T_CHAR:
3075       t = T_INT;
3076       break;
3077     default:
3078       break;
3079     }
3080 
3081     LIR_Opr dest = new_register(t);
3082     __ move(src, dest);
3083 
3084     // Assign new location to Local instruction for this local
3085     Local* local = x->state()->local_at(java_index)->as_Local();
3086     assert(local != nullptr, "Locals for incoming arguments must have been created");
3087 #ifndef __SOFTFP__
3088     // The java calling convention passes double as long and float as int.
3089     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
3090 #endif // __SOFTFP__
3091     local->set_operand(dest);
3092     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, nullptr);
3093     java_index += type2size[t];
3094   }
3095 
3096   if (compilation()->env()->dtrace_method_probes()) {
3097     BasicTypeList signature;
3098     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3099     signature.append(T_METADATA); // Method*
3100     LIR_OprList* args = new LIR_OprList();
3101     args->append(getThreadPointer());
3102     LIR_Opr meth = new_register(T_METADATA);
3103     __ metadata2reg(method()->constant_encoding(), meth);
3104     args->append(meth);
3105     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, nullptr);
3106   }
3107 
3108   if (method()->is_synchronized()) {
3109     LIR_Opr obj;
3110     if (method()->is_static()) {
3111       obj = new_register(T_OBJECT);
3112       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
3113     } else {
3114       Local* receiver = x->state()->local_at(0)->as_Local();
3115       assert(receiver != nullptr, "must already exist");
3116       obj = receiver->operand();
3117     }
3118     assert(obj->is_valid(), "must be valid");
3119 
3120     if (method()->is_synchronized() && GenerateSynchronizationCode) {
3121       LIR_Opr lock = syncLockOpr();
3122       __ load_stack_address_monitor(0, lock);
3123 
3124       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
3125       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3126 
3127       // receiver is guaranteed non-null so don't need CodeEmitInfo
3128       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
3129     }
3130   }
3131   // increment invocation counters if needed
3132   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3133     profile_parameters(x);
3134     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
3135     increment_invocation_counter(info);
3136   }
3137   if (method()->has_scalarized_args()) {
3138     // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
3139     // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
3140     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), nullptr, false);
3141     CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
3142     __ append(new LIR_Op0(lir_check_orig_pc));
3143     __ branch(lir_cond_notEqual, deopt_stub);
3144   }
3145 
3146   // all blocks with a successor must end with an unconditional jump
3147   // to the successor even if they are consecutive
3148   __ jump(x->default_sux());
3149 }
3150 
3151 
3152 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3153   // construct our frame and model the production of incoming pointer
3154   // to the OSR buffer.
3155   __ osr_entry(LIR_Assembler::osrBufferPointer());
3156   LIR_Opr result = rlock_result(x);
3157   __ move(LIR_Assembler::osrBufferPointer(), result);
3158 }
3159 
3160 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3161   if (loc->is_register()) {
3162     param->load_item_force(loc);
3163   } else {
3164     LIR_Address* addr = loc->as_address_ptr();
3165     param->load_for_store(addr->type());
3166     assert(addr->type() != T_PRIMITIVE_OBJECT, "not supported yet");
3167     if (addr->type() == T_OBJECT) {
3168       __ move_wide(param->result(), addr);
3169     } else {
3170       __ move(param->result(), addr);
3171     }
3172   }
3173 }
3174 
3175 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3176   assert(args->length() == arg_list->length(),
3177          "args=%d, arg_list=%d", args->length(), arg_list->length());
3178   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3179     LIRItem* param = args->at(i);
3180     LIR_Opr loc = arg_list->at(i);
3181     invoke_load_one_argument(param, loc);
3182   }
3183 
3184   if (x->has_receiver()) {
3185     LIRItem* receiver = args->at(0);
3186     LIR_Opr loc = arg_list->at(0);
3187     if (loc->is_register()) {
3188       receiver->load_item_force(loc);
3189     } else {
3190       assert(loc->is_address(), "just checking");
3191       receiver->load_for_store(T_OBJECT);
3192       __ move_wide(receiver->result(), loc->as_address_ptr());
3193     }
3194   }
3195 }
3196 
3197 
3198 // Visits all arguments, returns appropriate items without loading them
3199 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3200   LIRItemList* argument_items = new LIRItemList();
3201   if (x->has_receiver()) {
3202     LIRItem* receiver = new LIRItem(x->receiver(), this);
3203     argument_items->append(receiver);
3204   }
3205   for (int i = 0; i < x->number_of_arguments(); i++) {
3206     LIRItem* param = new LIRItem(x->argument_at(i), this);
3207     argument_items->append(param);
3208   }
3209   return argument_items;
3210 }
3211 
3212 
3213 // The invoke with receiver has following phases:
3214 //   a) traverse and load/lock receiver;
3215 //   b) traverse all arguments -> item-array (invoke_visit_argument)
3216 //   c) push receiver on stack
3217 //   d) load each of the items and push on stack
3218 //   e) unlock receiver
3219 //   f) move receiver into receiver-register %o0
3220 //   g) lock result registers and emit call operation
3221 //
3222 // Before issuing a call, we must spill-save all values on stack
3223 // that are in caller-save register. "spill-save" moves those registers
3224 // either in a free callee-save register or spills them if no free
3225 // callee save register is available.
3226 //
3227 // The problem is where to invoke spill-save.
3228 // - if invoked between e) and f), we may lock callee save
3229 //   register in "spill-save" that destroys the receiver register
3230 //   before f) is executed
3231 // - if we rearrange f) to be earlier (by loading %o0) it
3232 //   may destroy a value on the stack that is currently in %o0
3233 //   and is waiting to be spilled
3234 // - if we keep the receiver locked while doing spill-save,
3235 //   we cannot spill it as it is spill-locked
3236 //
3237 void LIRGenerator::do_Invoke(Invoke* x) {
3238   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
3239 
3240   LIR_OprList* arg_list = cc->args();
3241   LIRItemList* args = invoke_visit_arguments(x);
3242   LIR_Opr receiver = LIR_OprFact::illegalOpr;
3243 
3244   // setup result register
3245   LIR_Opr result_register = LIR_OprFact::illegalOpr;
3246   if (x->type() != voidType) {
3247     result_register = result_register_for(x->type());
3248   }
3249 
3250   CodeEmitInfo* info = state_for(x, x->state());
3251 
3252   invoke_load_arguments(x, args, arg_list);
3253 
3254   if (x->has_receiver()) {
3255     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
3256     receiver = args->at(0)->result();
3257   }
3258 
3259   // emit invoke code
3260   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
3261 
3262   // JSR 292
3263   // Preserve the SP over MethodHandle call sites, if needed.
3264   ciMethod* target = x->target();
3265   bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
3266                                   target->is_method_handle_intrinsic() ||
3267                                   target->is_compiled_lambda_form());
3268   if (is_method_handle_invoke) {
3269     info->set_is_method_handle_invoke(true);
3270     if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
3271         __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
3272     }
3273   }
3274 
3275   switch (x->code()) {
3276     case Bytecodes::_invokestatic:
3277       __ call_static(target, result_register,
3278                      SharedRuntime::get_resolve_static_call_stub(),
3279                      arg_list, info);
3280       break;
3281     case Bytecodes::_invokespecial:
3282     case Bytecodes::_invokevirtual:
3283     case Bytecodes::_invokeinterface:
3284       // for loaded and final (method or class) target we still produce an inline cache,
3285       // in order to be able to call mixed mode
3286       if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
3287         __ call_opt_virtual(target, receiver, result_register,
3288                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
3289                             arg_list, info);
3290       } else {
3291         __ call_icvirtual(target, receiver, result_register,
3292                           SharedRuntime::get_resolve_virtual_call_stub(),
3293                           arg_list, info);
3294       }
3295       break;
3296     case Bytecodes::_invokedynamic: {
3297       __ call_dynamic(target, receiver, result_register,
3298                       SharedRuntime::get_resolve_static_call_stub(),
3299                       arg_list, info);
3300       break;
3301     }
3302     default:
3303       fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
3304       break;
3305   }
3306 
3307   // JSR 292
3308   // Restore the SP after MethodHandle call sites, if needed.
3309   if (is_method_handle_invoke
3310       && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
3311     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
3312   }
3313 
3314   if (result_register->is_valid()) {
3315     LIR_Opr result = rlock_result(x);
3316     __ move(result_register, result);
3317   }
3318 }
3319 
3320 
3321 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3322   assert(x->number_of_arguments() == 1, "wrong type");
3323   LIRItem value       (x->argument_at(0), this);
3324   LIR_Opr reg = rlock_result(x);
3325   value.load_item();
3326   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3327   __ move(tmp, reg);
3328 }
3329 
3330 
3331 
3332 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3333 void LIRGenerator::do_IfOp(IfOp* x) {
3334 #ifdef ASSERT
3335   {
3336     ValueTag xtag = x->x()->type()->tag();
3337     ValueTag ttag = x->tval()->type()->tag();
3338     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3339     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3340     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3341   }
3342 #endif
3343 
3344   LIRItem left(x->x(), this);
3345   LIRItem right(x->y(), this);
3346   left.load_item();
3347   if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3348     right.dont_load_item();
3349   } else {
3350     // substitutability_check() needs to use right as a base register.
3351     right.load_item();
3352   }
3353 
3354   LIRItem t_val(x->tval(), this);
3355   LIRItem f_val(x->fval(), this);
3356   t_val.dont_load_item();
3357   f_val.dont_load_item();
3358 
3359   if (x->substitutability_check()) {
3360     substitutability_check(x, left, right, t_val, f_val);
3361   } else {
3362     LIR_Opr reg = rlock_result(x);
3363     __ cmp(lir_cond(x->cond()), left.result(), right.result());
3364     __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3365   }
3366 }
3367 
3368 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3369   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3370   bool is_acmpeq = (x->cond() == If::eql);
3371   LIR_Opr equal_result     = is_acmpeq ? t_val.result() : f_val.result();
3372   LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3373   LIR_Opr result = rlock_result(x);
3374   CodeEmitInfo* info = state_for(x, x->state_before());
3375 
3376   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3377 }
3378 
3379 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3380   LIR_Opr equal_result     = LIR_OprFact::intConst(1);
3381   LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3382   LIR_Opr result = new_register(T_INT);
3383   CodeEmitInfo* info = state_for(x, x->state_before());
3384 
3385   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3386 
3387   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3388   __ cmp(lir_cond(x->cond()), result, equal_result);
3389 }
3390 
3391 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3392                                                  LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3393                                                  CodeEmitInfo* info) {
3394   LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3395   LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3396   LIR_Opr left_klass_op = LIR_OprFact::illegalOpr;
3397   LIR_Opr right_klass_op = LIR_OprFact::illegalOpr;
3398 
3399   ciKlass* left_klass  = left_val ->as_loaded_klass_or_null();
3400   ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3401 
3402   if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
3403       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
3404     init_temps_for_substitutability_check(tmp1, tmp2);
3405   }
3406 
3407   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
3408     // No need to load klass -- the operands are statically known to be the same inline klass.
3409   } else {
3410     BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3411     left_klass_op = new_register(t_klass);
3412     right_klass_op = new_register(t_klass);
3413   }
3414 
3415   CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3416   __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3417                             tmp1, tmp2,
3418                             left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path);
3419 }
3420 
3421 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3422   assert(x->number_of_arguments() == 0, "wrong type");
3423   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3424   BasicTypeList signature;
3425   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3426   LIR_Opr reg = result_register_for(x->type());
3427   __ call_runtime_leaf(routine, getThreadTemp(),
3428                        reg, new LIR_OprList());
3429   LIR_Opr result = rlock_result(x);
3430   __ move(reg, result);
3431 }
3432 
3433 
3434 
3435 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3436   switch (x->id()) {
3437   case vmIntrinsics::_intBitsToFloat      :
3438   case vmIntrinsics::_doubleToRawLongBits :
3439   case vmIntrinsics::_longBitsToDouble    :
3440   case vmIntrinsics::_floatToRawIntBits   : {
3441     do_FPIntrinsics(x);
3442     break;
3443   }
3444 
3445 #ifdef JFR_HAVE_INTRINSICS
3446   case vmIntrinsics::_counterTime:
3447     do_RuntimeCall(CAST_FROM_FN_PTR(address, JfrTime::time_function()), x);
3448     break;
3449 #endif
3450 
3451   case vmIntrinsics::_currentTimeMillis:
3452     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);
3453     break;
3454 
3455   case vmIntrinsics::_nanoTime:
3456     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);
3457     break;
3458 
3459   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
3460   case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
3461   case vmIntrinsics::_isPrimitive:    do_isPrimitive(x);   break;
3462   case vmIntrinsics::_getModifiers:   do_getModifiers(x);  break;
3463   case vmIntrinsics::_getClass:       do_getClass(x);      break;
3464   case vmIntrinsics::_getObjectSize:  do_getObjectSize(x); break;
3465   case vmIntrinsics::_currentCarrierThread: do_currentCarrierThread(x); break;
3466   case vmIntrinsics::_currentThread:  do_vthread(x);       break;
3467   case vmIntrinsics::_scopedValueCache: do_scopedValueCache(x); break;
3468 
3469   case vmIntrinsics::_dlog:           // fall through
3470   case vmIntrinsics::_dlog10:         // fall through
3471   case vmIntrinsics::_dabs:           // fall through
3472   case vmIntrinsics::_dsqrt:          // fall through
3473   case vmIntrinsics::_dsqrt_strict:   // fall through
3474   case vmIntrinsics::_dtan:           // fall through
3475   case vmIntrinsics::_dsin :          // fall through
3476   case vmIntrinsics::_dcos :          // fall through
3477   case vmIntrinsics::_dexp :          // fall through
3478   case vmIntrinsics::_dpow :          do_MathIntrinsic(x); break;
3479   case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
3480 
3481   case vmIntrinsics::_fmaD:           do_FmaIntrinsic(x); break;
3482   case vmIntrinsics::_fmaF:           do_FmaIntrinsic(x); break;
3483 
3484   // Use java.lang.Math intrinsics code since it works for these intrinsics too.
3485   case vmIntrinsics::_floatToFloat16: // fall through
3486   case vmIntrinsics::_float16ToFloat: do_MathIntrinsic(x); break;
3487 
3488   case vmIntrinsics::_Preconditions_checkIndex:
3489     do_PreconditionsCheckIndex(x, T_INT);
3490     break;
3491   case vmIntrinsics::_Preconditions_checkLongIndex:
3492     do_PreconditionsCheckIndex(x, T_LONG);
3493     break;
3494 
3495   case vmIntrinsics::_compareAndSetReference:
3496     do_CompareAndSwap(x, objectType);
3497     break;
3498   case vmIntrinsics::_compareAndSetInt:
3499     do_CompareAndSwap(x, intType);
3500     break;
3501   case vmIntrinsics::_compareAndSetLong:
3502     do_CompareAndSwap(x, longType);
3503     break;
3504 
3505   case vmIntrinsics::_loadFence :
3506     __ membar_acquire();
3507     break;
3508   case vmIntrinsics::_storeFence:
3509     __ membar_release();
3510     break;
3511   case vmIntrinsics::_storeStoreFence:
3512     __ membar_storestore();
3513     break;
3514   case vmIntrinsics::_fullFence :
3515     __ membar();
3516     break;
3517   case vmIntrinsics::_onSpinWait:
3518     __ on_spin_wait();
3519     break;
3520   case vmIntrinsics::_Reference_get:
3521     do_Reference_get(x);
3522     break;
3523 
3524   case vmIntrinsics::_updateCRC32:
3525   case vmIntrinsics::_updateBytesCRC32:
3526   case vmIntrinsics::_updateByteBufferCRC32:
3527     do_update_CRC32(x);
3528     break;
3529 
3530   case vmIntrinsics::_updateBytesCRC32C:
3531   case vmIntrinsics::_updateDirectByteBufferCRC32C:
3532     do_update_CRC32C(x);
3533     break;
3534 
3535   case vmIntrinsics::_vectorizedMismatch:
3536     do_vectorizedMismatch(x);
3537     break;
3538 
3539   case vmIntrinsics::_blackhole:
3540     do_blackhole(x);
3541     break;
3542 
3543   default: ShouldNotReachHere(); break;
3544   }
3545 }
3546 
3547 void LIRGenerator::profile_arguments(ProfileCall* x) {
3548   if (compilation()->profile_arguments()) {
3549     int bci = x->bci_of_invoke();
3550     ciMethodData* md = x->method()->method_data_or_null();
3551     assert(md != nullptr, "Sanity");
3552     ciProfileData* data = md->bci_to_data(bci);
3553     if (data != nullptr) {
3554       if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3555           (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3556         ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3557         int base_offset = md->byte_offset_of_slot(data, extra);
3558         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3559         ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3560 
3561         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3562         int start = 0;
3563         int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3564         if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3565           // first argument is not profiled at call (method handle invoke)
3566           assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3567           start = 1;
3568         }
3569         ciSignature* callee_signature = x->callee()->signature();
3570         // method handle call to virtual method
3571         bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3572         ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : nullptr);
3573 
3574         bool ignored_will_link;
3575         ciSignature* signature_at_call = nullptr;
3576         x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3577         ciSignatureStream signature_at_call_stream(signature_at_call);
3578 
3579         // if called through method handle invoke, some arguments may have been popped
3580         for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3581           int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3582           ciKlass* exact = profile_type(md, base_offset, off,
3583               args->type(i), x->profiled_arg_at(i+start), mdp,
3584               !x->arg_needs_null_check(i+start),
3585               signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3586           if (exact != nullptr) {
3587             md->set_argument_type(bci, i, exact);
3588           }
3589         }
3590       } else {
3591 #ifdef ASSERT
3592         Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3593         int n = x->nb_profiled_args();
3594         assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3595             (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3596             "only at JSR292 bytecodes");
3597 #endif
3598       }
3599     }
3600   }
3601 }
3602 
3603 // profile parameters on entry to an inlined method
3604 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3605   if (compilation()->profile_parameters() && x->inlined()) {
3606     ciMethodData* md = x->callee()->method_data_or_null();
3607     if (md != nullptr) {
3608       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3609       if (parameters_type_data != nullptr) {
3610         ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
3611         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3612         bool has_receiver = !x->callee()->is_static();
3613         ciSignature* sig = x->callee()->signature();
3614         ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : nullptr);
3615         int i = 0; // to iterate on the Instructions
3616         Value arg = x->recv();
3617         bool not_null = false;
3618         int bci = x->bci_of_invoke();
3619         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3620         // The first parameter is the receiver so that's what we start
3621         // with if it exists. One exception is method handle call to
3622         // virtual method: the receiver is in the args list
3623         if (arg == nullptr || !Bytecodes::has_receiver(bc)) {
3624           i = 1;
3625           arg = x->profiled_arg_at(0);
3626           not_null = !x->arg_needs_null_check(0);
3627         }
3628         int k = 0; // to iterate on the profile data
3629         for (;;) {
3630           intptr_t profiled_k = parameters->type(k);
3631           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3632                                         in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3633                                         profiled_k, arg, mdp, not_null, sig_stream.next_klass(), nullptr);
3634           // If the profile is known statically set it once for all and do not emit any code
3635           if (exact != nullptr) {
3636             md->set_parameter_type(k, exact);
3637           }
3638           k++;
3639           if (k >= parameters_type_data->number_of_parameters()) {
3640 #ifdef ASSERT
3641             int extra = 0;
3642             if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3643                 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3644                 x->recv() != nullptr && Bytecodes::has_receiver(bc)) {
3645               extra += 1;
3646             }
3647             assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3648 #endif
3649             break;
3650           }
3651           arg = x->profiled_arg_at(i);
3652           not_null = !x->arg_needs_null_check(i);
3653           i++;
3654         }
3655       }
3656     }
3657   }
3658 }
3659 
3660 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3661   // Need recv in a temporary register so it interferes with the other temporaries
3662   LIR_Opr recv = LIR_OprFact::illegalOpr;
3663   LIR_Opr mdo = new_register(T_METADATA);
3664   // tmp is used to hold the counters on SPARC
3665   LIR_Opr tmp = new_pointer_register();
3666 
3667   if (x->nb_profiled_args() > 0) {
3668     profile_arguments(x);
3669   }
3670 
3671   // profile parameters on inlined method entry including receiver
3672   if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3673     profile_parameters_at_call(x);
3674   }
3675 
3676   if (x->recv() != nullptr) {
3677     LIRItem value(x->recv(), this);
3678     value.load_item();
3679     recv = new_register(T_OBJECT);
3680     __ move(value.result(), recv);
3681   }
3682   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3683 }
3684 
3685 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3686   int bci = x->bci_of_invoke();
3687   ciMethodData* md = x->method()->method_data_or_null();
3688   assert(md != nullptr, "Sanity");
3689   ciProfileData* data = md->bci_to_data(bci);
3690   if (data != nullptr) {
3691     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3692     ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3693     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3694 
3695     bool ignored_will_link;
3696     ciSignature* signature_at_call = nullptr;
3697     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3698 
3699     // The offset within the MDO of the entry to update may be too large
3700     // to be used in load/store instructions on some platforms. So have
3701     // profile_type() compute the address of the profile in a register.
3702     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3703         ret->type(), x->ret(), mdp,
3704         !x->needs_null_check(),
3705         signature_at_call->return_type()->as_klass(),
3706         x->callee()->signature()->return_type()->as_klass());
3707     if (exact != nullptr) {
3708       md->set_return_type(bci, exact);
3709     }
3710   }
3711 }
3712 
3713 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
3714   ciKlass* klass = value->as_loaded_klass_or_null();
3715   if (klass != nullptr) {
3716     if (klass->is_inlinetype()) {
3717       profile_flags(md, data, flag, lir_cond_always);
3718     } else if (klass->can_be_inline_klass()) {
3719       return false;
3720     }
3721   } else {
3722     return false;
3723   }
3724   return true;
3725 }
3726 
3727 
3728 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
3729   ciMethod* method = x->method();
3730   assert(method != nullptr, "method should be set if branch is profiled");
3731   ciMethodData* md = method->method_data_or_null();
3732   assert(md != nullptr, "Sanity");
3733   ciProfileData* data = md->bci_to_data(x->bci());
3734   assert(data != nullptr, "must have profiling data");
3735   assert(data->is_ACmpData(), "need BranchData for two-way branches");
3736   ciACmpData* acmp = (ciACmpData*)data;
3737   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3738   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
3739                acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), nullptr, nullptr);
3740   int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
3741   if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
3742     LIR_Opr mdp = new_register(T_METADATA);
3743     __ metadata2reg(md->constant_encoding(), mdp);
3744     LIRItem value(x->left(), this);
3745     value.load_item();
3746     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3747   }
3748   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
3749                in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
3750                acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), nullptr, nullptr);
3751   if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
3752     LIR_Opr mdp = new_register(T_METADATA);
3753     __ metadata2reg(md->constant_encoding(), mdp);
3754     LIRItem value(x->right(), this);
3755     value.load_item();
3756     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3757   }
3758 }
3759 
3760 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3761   // We can safely ignore accessors here, since c2 will inline them anyway,
3762   // accessors are also always mature.
3763   if (!x->inlinee()->is_accessor()) {
3764     CodeEmitInfo* info = state_for(x, x->state(), true);
3765     // Notify the runtime very infrequently only to take care of counter overflows
3766     int freq_log = Tier23InlineeNotifyFreqLog;
3767     double scale;
3768     if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3769       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3770     }
3771     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3772   }
3773 }
3774 
3775 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3776   if (compilation()->is_profiling()) {
3777 #if defined(X86) && !defined(_LP64)
3778     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3779     LIR_Opr left_copy = new_register(left->type());
3780     __ move(left, left_copy);
3781     __ cmp(cond, left_copy, right);
3782 #else
3783     __ cmp(cond, left, right);
3784 #endif
3785     LIR_Opr step = new_register(T_INT);
3786     LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);
3787     LIR_Opr zero = LIR_OprFact::intConst(0);
3788     __ cmove(cond,
3789         (left_bci < bci) ? plus_one : zero,
3790         (right_bci < bci) ? plus_one : zero,
3791         step, left->type());
3792     increment_backedge_counter(info, step, bci);
3793   }
3794 }
3795 
3796 
3797 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {
3798   int freq_log = 0;
3799   int level = compilation()->env()->comp_level();
3800   if (level == CompLevel_limited_profile) {
3801     freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3802   } else if (level == CompLevel_full_profile) {
3803     freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3804   } else {
3805     ShouldNotReachHere();
3806   }
3807   // Increment the appropriate invocation/backedge counter and notify the runtime.
3808   double scale;
3809   if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3810     freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3811   }
3812   increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
3813 }
3814 
3815 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3816                                                 ciMethod *method, LIR_Opr step, int frequency,
3817                                                 int bci, bool backedge, bool notify) {
3818   assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3819   int level = _compilation->env()->comp_level();
3820   assert(level > CompLevel_simple, "Shouldn't be here");
3821 
3822   int offset = -1;
3823   LIR_Opr counter_holder;
3824   if (level == CompLevel_limited_profile) {
3825     MethodCounters* counters_adr = method->ensure_method_counters();
3826     if (counters_adr == nullptr) {
3827       bailout("method counters allocation failed");
3828       return;
3829     }
3830     counter_holder = new_pointer_register();
3831     __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3832     offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3833                                  MethodCounters::invocation_counter_offset());
3834   } else if (level == CompLevel_full_profile) {
3835     counter_holder = new_register(T_METADATA);
3836     offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3837                                  MethodData::invocation_counter_offset());
3838     ciMethodData* md = method->method_data_or_null();
3839     assert(md != nullptr, "Sanity");
3840     __ metadata2reg(md->constant_encoding(), counter_holder);
3841   } else {
3842     ShouldNotReachHere();
3843   }
3844   LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3845   LIR_Opr result = new_register(T_INT);
3846   __ load(counter, result);
3847   __ add(result, step, result);
3848   __ store(result, counter);
3849   if (notify && (!backedge || UseOnStackReplacement)) {
3850     LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
3851     // The bci for info can point to cmp for if's we want the if bci
3852     CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3853     int freq = frequency << InvocationCounter::count_shift;
3854     if (freq == 0) {
3855       if (!step->is_constant()) {
3856         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3857         __ branch(lir_cond_notEqual, overflow);
3858       } else {
3859         __ branch(lir_cond_always, overflow);
3860       }
3861     } else {
3862       LIR_Opr mask = load_immediate(freq, T_INT);
3863       if (!step->is_constant()) {
3864         // If step is 0, make sure the overflow check below always fails
3865         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3866         __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);
3867       }
3868       __ logical_and(result, mask, result);
3869       __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3870       __ branch(lir_cond_equal, overflow);
3871     }
3872     __ branch_destination(overflow->continuation());
3873   }
3874 }
3875 
3876 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3877   LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3878   BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3879 
3880   if (x->pass_thread()) {
3881     signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3882     args->append(getThreadPointer());
3883   }
3884 
3885   for (int i = 0; i < x->number_of_arguments(); i++) {
3886     Value a = x->argument_at(i);
3887     LIRItem* item = new LIRItem(a, this);
3888     item->load_item();
3889     args->append(item->result());
3890     signature->append(as_BasicType(a->type()));
3891   }
3892 
3893   LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), nullptr);
3894   if (x->type() == voidType) {
3895     set_no_result(x);
3896   } else {
3897     __ move(result, rlock_result(x));
3898   }
3899 }
3900 
3901 #ifdef ASSERT
3902 void LIRGenerator::do_Assert(Assert *x) {
3903   ValueTag tag = x->x()->type()->tag();
3904   If::Condition cond = x->cond();
3905 
3906   LIRItem xitem(x->x(), this);
3907   LIRItem yitem(x->y(), this);
3908   LIRItem* xin = &xitem;
3909   LIRItem* yin = &yitem;
3910 
3911   assert(tag == intTag, "Only integer assertions are valid!");
3912 
3913   xin->load_item();
3914   yin->dont_load_item();
3915 
3916   set_no_result(x);
3917 
3918   LIR_Opr left = xin->result();
3919   LIR_Opr right = yin->result();
3920 
3921   __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3922 }
3923 #endif
3924 
3925 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3926 
3927 
3928   Instruction *a = x->x();
3929   Instruction *b = x->y();
3930   if (!a || StressRangeCheckElimination) {
3931     assert(!b || StressRangeCheckElimination, "B must also be null");
3932 
3933     CodeEmitInfo *info = state_for(x, x->state());
3934     CodeStub* stub = new PredicateFailedStub(info);
3935 
3936     __ jump(stub);
3937   } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3938     int a_int = a->type()->as_IntConstant()->value();
3939     int b_int = b->type()->as_IntConstant()->value();
3940 
3941     bool ok = false;
3942 
3943     switch(x->cond()) {
3944       case Instruction::eql: ok = (a_int == b_int); break;
3945       case Instruction::neq: ok = (a_int != b_int); break;
3946       case Instruction::lss: ok = (a_int < b_int); break;
3947       case Instruction::leq: ok = (a_int <= b_int); break;
3948       case Instruction::gtr: ok = (a_int > b_int); break;
3949       case Instruction::geq: ok = (a_int >= b_int); break;
3950       case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3951       case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3952       default: ShouldNotReachHere();
3953     }
3954 
3955     if (ok) {
3956 
3957       CodeEmitInfo *info = state_for(x, x->state());
3958       CodeStub* stub = new PredicateFailedStub(info);
3959 
3960       __ jump(stub);
3961     }
3962   } else {
3963 
3964     ValueTag tag = x->x()->type()->tag();
3965     If::Condition cond = x->cond();
3966     LIRItem xitem(x->x(), this);
3967     LIRItem yitem(x->y(), this);
3968     LIRItem* xin = &xitem;
3969     LIRItem* yin = &yitem;
3970 
3971     assert(tag == intTag, "Only integer deoptimizations are valid!");
3972 
3973     xin->load_item();
3974     yin->dont_load_item();
3975     set_no_result(x);
3976 
3977     LIR_Opr left = xin->result();
3978     LIR_Opr right = yin->result();
3979 
3980     CodeEmitInfo *info = state_for(x, x->state());
3981     CodeStub* stub = new PredicateFailedStub(info);
3982 
3983     __ cmp(lir_cond(cond), left, right);
3984     __ branch(lir_cond(cond), stub);
3985   }
3986 }
3987 
3988 void LIRGenerator::do_blackhole(Intrinsic *x) {
3989   assert(!x->has_receiver(), "Should have been checked before: only static methods here");
3990   for (int c = 0; c < x->number_of_arguments(); c++) {
3991     // Load the argument
3992     LIRItem vitem(x->argument_at(c), this);
3993     vitem.load_item();
3994     // ...and leave it unused.
3995   }
3996 }
3997 
3998 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3999   LIRItemList args(1);
4000   LIRItem value(arg1, this);
4001   args.append(&value);
4002   BasicTypeList signature;
4003   signature.append(as_BasicType(arg1->type()));
4004 
4005   return call_runtime(&signature, &args, entry, result_type, info);
4006 }
4007 
4008 
4009 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
4010   LIRItemList args(2);
4011   LIRItem value1(arg1, this);
4012   LIRItem value2(arg2, this);
4013   args.append(&value1);
4014   args.append(&value2);
4015   BasicTypeList signature;
4016   signature.append(as_BasicType(arg1->type()));
4017   signature.append(as_BasicType(arg2->type()));
4018 
4019   return call_runtime(&signature, &args, entry, result_type, info);
4020 }
4021 
4022 
4023 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
4024                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
4025   // get a result register
4026   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
4027   LIR_Opr result = LIR_OprFact::illegalOpr;
4028   if (result_type->tag() != voidTag) {
4029     result = new_register(result_type);
4030     phys_reg = result_register_for(result_type);
4031   }
4032 
4033   // move the arguments into the correct location
4034   CallingConvention* cc = frame_map()->c_calling_convention(signature);
4035   assert(cc->length() == args->length(), "argument mismatch");
4036   for (int i = 0; i < args->length(); i++) {
4037     LIR_Opr arg = args->at(i);
4038     LIR_Opr loc = cc->at(i);
4039     if (loc->is_register()) {
4040       __ move(arg, loc);
4041     } else {
4042       LIR_Address* addr = loc->as_address_ptr();
4043 //           if (!can_store_as_constant(arg)) {
4044 //             LIR_Opr tmp = new_register(arg->type());
4045 //             __ move(arg, tmp);
4046 //             arg = tmp;
4047 //           }
4048       __ move(arg, addr);
4049     }
4050   }
4051 
4052   if (info) {
4053     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
4054   } else {
4055     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
4056   }
4057   if (result->is_valid()) {
4058     __ move(phys_reg, result);
4059   }
4060   return result;
4061 }
4062 
4063 
4064 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
4065                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
4066   // get a result register
4067   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
4068   LIR_Opr result = LIR_OprFact::illegalOpr;
4069   if (result_type->tag() != voidTag) {
4070     result = new_register(result_type);
4071     phys_reg = result_register_for(result_type);
4072   }
4073 
4074   // move the arguments into the correct location
4075   CallingConvention* cc = frame_map()->c_calling_convention(signature);
4076 
4077   assert(cc->length() == args->length(), "argument mismatch");
4078   for (int i = 0; i < args->length(); i++) {
4079     LIRItem* arg = args->at(i);
4080     LIR_Opr loc = cc->at(i);
4081     if (loc->is_register()) {
4082       arg->load_item_force(loc);
4083     } else {
4084       LIR_Address* addr = loc->as_address_ptr();
4085       arg->load_for_store(addr->type());
4086       __ move(arg->result(), addr);
4087     }
4088   }
4089 
4090   if (info) {
4091     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
4092   } else {
4093     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
4094   }
4095   if (result->is_valid()) {
4096     __ move(phys_reg, result);
4097   }
4098   return result;
4099 }
4100 
4101 void LIRGenerator::do_MemBar(MemBar* x) {
4102   LIR_Code code = x->code();
4103   switch(code) {
4104   case lir_membar_acquire   : __ membar_acquire(); break;
4105   case lir_membar_release   : __ membar_release(); break;
4106   case lir_membar           : __ membar(); break;
4107   case lir_membar_loadload  : __ membar_loadload(); break;
4108   case lir_membar_storestore: __ membar_storestore(); break;
4109   case lir_membar_loadstore : __ membar_loadstore(); break;
4110   case lir_membar_storeload : __ membar_storeload(); break;
4111   default                   : ShouldNotReachHere(); break;
4112   }
4113 }
4114 
4115 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
4116   LIR_Opr value_fixed = rlock_byte(T_BYTE);
4117   if (two_operand_lir_form) {
4118     __ move(value, value_fixed);
4119     __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
4120   } else {
4121     __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
4122   }
4123   LIR_Opr klass = new_register(T_METADATA);
4124   load_klass(array, klass, null_check_info);
4125   null_check_info = nullptr;
4126   LIR_Opr layout = new_register(T_INT);
4127   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
4128   int diffbit = Klass::layout_helper_boolean_diffbit();
4129   __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
4130   __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
4131   __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
4132   value = value_fixed;
4133   return value;
4134 }