< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page

   1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Compilation.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"


  33 #include "ci/ciInstance.hpp"
  34 #include "ci/ciObjArray.hpp"

  35 #include "ci/ciUtilities.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "compiler/compilerOracle.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/c1/barrierSetC1.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "oops/methodCounters.hpp"

  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/vm_version.hpp"
  45 #include "utilities/bitMap.inline.hpp"
  46 #include "utilities/macros.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 
  49 #ifdef ASSERT
  50 #define __ gen()->lir(__FILE__, __LINE__)->
  51 #else
  52 #define __ gen()->lir()->
  53 #endif
  54 
  55 #ifndef PATCHED_ADDR
  56 #define PATCHED_ADDR  (max_jint)
  57 #endif
  58 
  59 void PhiResolverState::reset() {
  60   _virtual_operands.clear();
  61   _other_operands.clear();

 198 }
 199 
 200 
 201 //--------------------------------------------------------------
 202 // LIRItem
 203 
 204 void LIRItem::set_result(LIR_Opr opr) {
 205   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 206   value()->set_operand(opr);
 207 
 208 #ifdef ASSERT
 209   if (opr->is_virtual()) {
 210     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
 211   }
 212 #endif
 213 
 214   _result = opr;
 215 }
 216 
 217 void LIRItem::load_item() {


 218   if (result()->is_illegal()) {
 219     // update the items result
 220     _result = value()->operand();
 221   }
 222   if (!result()->is_register()) {
 223     LIR_Opr reg = _gen->new_register(value()->type());
 224     __ move(result(), reg);
 225     if (result()->is_constant()) {
 226       _result = reg;
 227     } else {
 228       set_result(reg);
 229     }
 230   }
 231 }
 232 
 233 
 234 void LIRItem::load_for_store(BasicType type) {
 235   if (_gen->can_store_as_constant(value(), type)) {
 236     _result = value()->operand();
 237     if (!_result->is_constant()) {

 605     assert(right_op != result_op, "malformed");
 606     __ move(left_op, result_op);
 607     left_op = result_op;
 608   }
 609 
 610   switch(code) {
 611     case Bytecodes::_iand:
 612     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 613 
 614     case Bytecodes::_ior:
 615     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 616 
 617     case Bytecodes::_ixor:
 618     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 619 
 620     default: ShouldNotReachHere();
 621   }
 622 }
 623 
 624 
 625 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {

 626   // for slow path, use debug info for state after successful locking
 627   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 628   __ load_stack_address_monitor(monitor_no, lock);
 629   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 630   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 631 }
 632 
 633 
 634 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 635   // setup registers
 636   LIR_Opr hdr = lock;
 637   lock = new_hdr;
 638   CodeStub* slow_path = new MonitorExitStub(lock, monitor_no);
 639   __ load_stack_address_monitor(monitor_no, lock);
 640   __ unlock_object(hdr, object, lock, scratch, slow_path);
 641 }
 642 
 643 #ifndef PRODUCT
 644 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 645   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 646     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 647   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 648     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 649   }
 650 }
 651 #endif
 652 
 653 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 654   klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 655   // If klass is not loaded we do not know if the klass has finalizers:
 656   if (UseFastNewInstance && klass->is_loaded()





 657       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 658 
 659     StubId stub_id = klass->is_initialized() ? StubId::c1_fast_new_instance_id : StubId::c1_fast_new_instance_init_check_id;
 660 
 661     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 662 
 663     assert(klass->is_loaded(), "must be loaded");
 664     // allocate space for instance
 665     assert(klass->size_helper() > 0, "illegal instance size");
 666     const int instance_size = align_object_size(klass->size_helper());
 667     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 668                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 669   } else {
 670     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, StubId::c1_new_instance_id);
 671     __ branch(lir_cond_always, slow_path);
 672     __ branch_destination(slow_path->continuation());
 673   }
 674 }
 675 
 676 
 677 static bool is_constant_zero(Instruction* inst) {
 678   IntConstant* c = inst->type()->as_IntConstant();
 679   if (c) {
 680     return (c->value() == 0);
 681   }
 682   return false;
 683 }
 684 
 685 
 686 static bool positive_constant(Instruction* inst) {
 687   IntConstant* c = inst->type()->as_IntConstant();
 688   if (c) {
 689     return (c->value() >= 0);
 690   }
 691   return false;

 743     } else if (dst_exact_type != nullptr && dst_exact_type->is_obj_array_klass()) {
 744       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
 745       ciArrayKlass* src_type = nullptr;
 746       if (src_exact_type != nullptr && src_exact_type->is_obj_array_klass()) {
 747         src_type = (ciArrayKlass*) src_exact_type;
 748       } else if (src_declared_type != nullptr && src_declared_type->is_obj_array_klass()) {
 749         src_type = (ciArrayKlass*) src_declared_type;
 750       }
 751       if (src_type != nullptr) {
 752         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 753           is_exact = true;
 754           expected_type = dst_type;
 755         }
 756       }
 757     }
 758     // at least pass along a good guess
 759     if (expected_type == nullptr) expected_type = dst_exact_type;
 760     if (expected_type == nullptr) expected_type = src_declared_type;
 761     if (expected_type == nullptr) expected_type = dst_declared_type;
 762 





 763     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 764     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 765   }
 766 
 767   // if a probable array type has been identified, figure out if any
 768   // of the required checks for a fast case can be elided.
 769   int flags = LIR_OpArrayCopy::all_flags;
 770 












 771   if (!src_objarray)
 772     flags &= ~LIR_OpArrayCopy::src_objarray;
 773   if (!dst_objarray)
 774     flags &= ~LIR_OpArrayCopy::dst_objarray;
 775 
 776   if (!x->arg_needs_null_check(0))
 777     flags &= ~LIR_OpArrayCopy::src_null_check;
 778   if (!x->arg_needs_null_check(2))
 779     flags &= ~LIR_OpArrayCopy::dst_null_check;
 780 
 781 
 782   if (expected_type != nullptr) {
 783     Value length_limit = nullptr;
 784 
 785     IfOp* ifop = length->as_IfOp();
 786     if (ifop != nullptr) {
 787       // look for expressions like min(v, a.length) which ends up as
 788       //   x > y ? y : x  or  x >= y ? y : x
 789       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 790           ifop->x() == ifop->fval() &&

1443   }
1444   return _vreg_flags.at(vreg_num, f);
1445 }
1446 
1447 
1448 // Block local constant handling.  This code is useful for keeping
1449 // unpinned constants and constants which aren't exposed in the IR in
1450 // registers.  Unpinned Constant instructions have their operands
1451 // cleared when the block is finished so that other blocks can't end
1452 // up referring to their registers.
1453 
1454 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1455   assert(!x->is_pinned(), "only for unpinned constants");
1456   _unpinned_constants.append(x);
1457   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1458 }
1459 
1460 
1461 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1462   BasicType t = c->type();
1463   for (int i = 0; i < _constants.length(); i++) {
1464     LIR_Const* other = _constants.at(i);
1465     if (t == other->type()) {
1466       switch (t) {
1467       case T_INT:
1468       case T_FLOAT:
1469         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1470         break;
1471       case T_LONG:
1472       case T_DOUBLE:
1473         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1474         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1475         break;
1476       case T_OBJECT:
1477         if (c->as_jobject() != other->as_jobject()) continue;
1478         break;
1479       default:
1480         break;
1481       }
1482       return _reg_for_constants.at(i);
1483     }
1484   }
1485 
1486   LIR_Opr result = new_register(t);
1487   __ move((LIR_Opr)c, result);
1488   _constants.append(c);
1489   _reg_for_constants.append(result);


1490   return result;
1491 }
1492 






1493 //------------------------field access--------------------------------------
1494 
1495 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1496   assert(x->number_of_arguments() == 4, "wrong type");
1497   LIRItem obj   (x->argument_at(0), this);  // object
1498   LIRItem offset(x->argument_at(1), this);  // offset of field
1499   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1500   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1501   assert(obj.type()->tag() == objectTag, "invalid type");
1502   assert(cmp.type()->tag() == type->tag(), "invalid type");
1503   assert(val.type()->tag() == type->tag(), "invalid type");
1504 
1505   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1506                                             obj, offset, cmp, val);
1507   set_result(x, result);
1508 }
1509 












1510 // Comment copied form templateTable_i486.cpp
1511 // ----------------------------------------------------------------------------
1512 // Volatile variables demand their effects be made known to all CPU's in
1513 // order.  Store buffers on most chips allow reads & writes to reorder; the
1514 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1515 // memory barrier (i.e., it's not sufficient that the interpreter does not
1516 // reorder volatile references, the hardware also must not reorder them).
1517 //
1518 // According to the new Java Memory Model (JMM):
1519 // (1) All volatiles are serialized wrt to each other.
1520 // ALSO reads & writes act as acquire & release, so:
1521 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1522 // the read float up to before the read.  It's OK for non-volatile memory refs
1523 // that happen before the volatile read to float down below it.
1524 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1525 // that happen BEFORE the write float down to after the write.  It's OK for
1526 // non-volatile memory refs that happen after the volatile write to float up
1527 // before it.
1528 //
1529 // We only put in barriers around volatile refs (they are expensive), not
1530 // _between_ memory refs (that would require us to track the flavor of the
1531 // previous memory refs).  Requirements (2) and (3) require some barriers
1532 // before volatile stores and after volatile loads.  These nearly cover
1533 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1534 // case is placed after volatile-stores although it could just as well go
1535 // before volatile-loads.
1536 
1537 
1538 void LIRGenerator::do_StoreField(StoreField* x) {

1539   bool needs_patching = x->needs_patching();
1540   bool is_volatile = x->field()->is_volatile();
1541   BasicType field_type = x->field_type();
1542 
1543   CodeEmitInfo* info = nullptr;
1544   if (needs_patching) {
1545     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1546     info = state_for(x, x->state_before());
1547   } else if (x->needs_null_check()) {
1548     NullCheck* nc = x->explicit_null_check();
1549     if (nc == nullptr) {
1550       info = state_for(x);
1551     } else {
1552       info = state_for(nc);
1553     }
1554   }
1555 
1556   LIRItem object(x->obj(), this);
1557   LIRItem value(x->value(),  this);
1558 
1559   object.load_item();
1560 
1561   if (is_volatile || needs_patching) {
1562     // load item if field is volatile (fewer special cases for volatiles)
1563     // load item if field not initialized
1564     // load item if field not constant
1565     // because of code patching we cannot inline constants
1566     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1567       value.load_byte_item();
1568     } else  {
1569       value.load_item();
1570     }
1571   } else {
1572     value.load_for_store(field_type);












1573   }
1574 
1575   set_no_result(x);
1576 
1577 #ifndef PRODUCT
1578   if (PrintNotLoaded && needs_patching) {
1579     tty->print_cr("   ###class not loaded at store_%s bci %d",
1580                   x->is_static() ?  "static" : "field", x->printable_bci());
1581   }
1582 #endif
1583 
1584   if (x->needs_null_check() &&
1585       (needs_patching ||
1586        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1587     // Emit an explicit null check because the offset is too large.
1588     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1589     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1590     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1591   }
1592 
1593   DecoratorSet decorators = IN_HEAP;
1594   if (is_volatile) {
1595     decorators |= MO_SEQ_CST;
1596   }
1597   if (needs_patching) {
1598     decorators |= C1_NEEDS_PATCHING;
1599   }
1600 









































1601   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1602                   value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1603 }
1604 






































































































































































































1605 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1606   assert(x->is_pinned(),"");


1607   bool needs_range_check = x->compute_needs_range_check();
1608   bool use_length = x->length() != nullptr;
1609   bool obj_store = is_reference_type(x->elt_type());
1610   bool needs_store_check = obj_store && (x->value()->as_Constant() == nullptr ||
1611                                          !get_jobject_constant(x->value())->is_null_object() ||
1612                                          x->should_profile());
1613 
1614   LIRItem array(x->array(), this);
1615   LIRItem index(x->index(), this);
1616   LIRItem value(x->value(), this);
1617   LIRItem length(this);
1618 
1619   array.load_item();
1620   index.load_nonconstant();
1621 
1622   if (use_length && needs_range_check) {
1623     length.set_instruction(x->length());
1624     length.load_item();
1625 
1626   }
1627   if (needs_store_check || x->check_boolean()) {


1628     value.load_item();
1629   } else {
1630     value.load_for_store(x->elt_type());
1631   }
1632 
1633   set_no_result(x);
1634 
1635   // the CodeEmitInfo must be duplicated for each different
1636   // LIR-instruction because spilling can occur anywhere between two
1637   // instructions and so the debug information must be different
1638   CodeEmitInfo* range_check_info = state_for(x);
1639   CodeEmitInfo* null_check_info = nullptr;
1640   if (x->needs_null_check()) {
1641     null_check_info = new CodeEmitInfo(range_check_info);
1642   }
1643 
1644   if (needs_range_check) {
1645     if (use_length) {
1646       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1647       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1648     } else {
1649       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1650       // range_check also does the null check
1651       null_check_info = nullptr;
1652     }
1653   }
1654 
1655   if (GenerateArrayStoreCheck && needs_store_check) {
1656     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1657     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1658   }
1659 
1660   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1661   if (x->check_boolean()) {
1662     decorators |= C1_MASK_BOOLEAN;















1663   }
1664 
1665   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1666                   nullptr, null_check_info);





































1667 }
1668 
1669 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1670                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1671                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1672   decorators |= ACCESS_READ;
1673   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1674   if (access.is_raw()) {
1675     _barrier_set->BarrierSetC1::load_at(access, result);
1676   } else {
1677     _barrier_set->load_at(access, result);
1678   }
1679 }
1680 
1681 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1682                                LIR_Opr addr, LIR_Opr result) {
1683   decorators |= ACCESS_READ;
1684   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1685   access.set_resolved_addr(addr);
1686   if (access.is_raw()) {
1687     _barrier_set->BarrierSetC1::load(access, result);
1688   } else {
1689     _barrier_set->load(access, result);
1690   }
1691 }
1692 
1693 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1694                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
1695                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {

1696   decorators |= ACCESS_WRITE;
1697   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
1698   if (access.is_raw()) {
1699     _barrier_set->BarrierSetC1::store_at(access, value);
1700   } else {
1701     _barrier_set->store_at(access, value);
1702   }
1703 }
1704 
1705 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1706                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1707   decorators |= ACCESS_READ;
1708   decorators |= ACCESS_WRITE;
1709   // Atomic operations are SEQ_CST by default
1710   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1711   LIRAccess access(this, decorators, base, offset, type);
1712   if (access.is_raw()) {
1713     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
1714   } else {
1715     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
1716   }
1717 }

1728   } else {
1729     return _barrier_set->atomic_xchg_at(access, value);
1730   }
1731 }
1732 
1733 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1734                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
1735   decorators |= ACCESS_READ;
1736   decorators |= ACCESS_WRITE;
1737   // Atomic operations are SEQ_CST by default
1738   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1739   LIRAccess access(this, decorators, base, offset, type);
1740   if (access.is_raw()) {
1741     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1742   } else {
1743     return _barrier_set->atomic_add_at(access, value);
1744   }
1745 }
1746 
1747 void LIRGenerator::do_LoadField(LoadField* x) {

1748   bool needs_patching = x->needs_patching();
1749   bool is_volatile = x->field()->is_volatile();
1750   BasicType field_type = x->field_type();
1751 
1752   CodeEmitInfo* info = nullptr;
1753   if (needs_patching) {
1754     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1755     info = state_for(x, x->state_before());
1756   } else if (x->needs_null_check()) {
1757     NullCheck* nc = x->explicit_null_check();
1758     if (nc == nullptr) {
1759       info = state_for(x);
1760     } else {
1761       info = state_for(nc);
1762     }
1763   }
1764 
1765   LIRItem object(x->obj(), this);
1766 
1767   object.load_item();
1768 
1769 #ifndef PRODUCT

1780        stress_deopt)) {
1781     LIR_Opr obj = object.result();
1782     if (stress_deopt) {
1783       obj = new_register(T_OBJECT);
1784       __ move(LIR_OprFact::oopConst(nullptr), obj);
1785     }
1786     // Emit an explicit null check because the offset is too large.
1787     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1788     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1789     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1790   }
1791 
1792   DecoratorSet decorators = IN_HEAP;
1793   if (is_volatile) {
1794     decorators |= MO_SEQ_CST;
1795   }
1796   if (needs_patching) {
1797     decorators |= C1_NEEDS_PATCHING;
1798   }
1799 



































1800   LIR_Opr result = rlock_result(x, field_type);
1801   access_load_at(decorators, field_type,
1802                  object, LIR_OprFact::intConst(x->offset()), result,
1803                  info ? new CodeEmitInfo(info) : nullptr, info);
1804 }
1805 
1806 // int/long jdk.internal.util.Preconditions.checkIndex
1807 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
1808   assert(x->number_of_arguments() == 3, "wrong type");
1809   LIRItem index(x->argument_at(0), this);
1810   LIRItem length(x->argument_at(1), this);
1811   LIRItem oobef(x->argument_at(2), this);
1812 
1813   index.load_item();
1814   length.load_item();
1815   oobef.load_item();
1816 
1817   LIR_Opr result = rlock_result(x);
1818   // x->state() is created from copy_state_for_exception, it does not contains arguments
1819   // we should prepare them before entering into interpreter mode due to deoptimization.

1928       __ move(LIR_OprFact::oopConst(nullptr), obj);
1929       __ null_check(obj, new CodeEmitInfo(null_check_info));
1930     }
1931   }
1932 
1933   if (needs_range_check) {
1934     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1935       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
1936     } else if (use_length) {
1937       // TODO: use a (modified) version of array_range_check that does not require a
1938       //       constant length to be loaded to a register
1939       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1940       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1941     } else {
1942       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1943       // The range check performs the null check, so clear it out for the load
1944       null_check_info = nullptr;
1945     }
1946   }
1947 
1948   DecoratorSet decorators = IN_HEAP | IS_ARRAY;






























































1949 
1950   LIR_Opr result = rlock_result(x, x->elt_type());
1951   access_load_at(decorators, x->elt_type(),
1952                  array, index.result(), result,
1953                  nullptr, null_check_info);







1954 }
1955 
1956 
1957 void LIRGenerator::do_NullCheck(NullCheck* x) {
1958   if (x->can_trap()) {
1959     LIRItem value(x->obj(), this);
1960     value.load_item();
1961     CodeEmitInfo* info = state_for(x);
1962     __ null_check(value.result(), info);
1963   }
1964 }
1965 
1966 
1967 void LIRGenerator::do_TypeCast(TypeCast* x) {
1968   LIRItem value(x->obj(), this);
1969   value.load_item();
1970   // the result is the same as from the node we are casting
1971   set_result(x, value.result());
1972 }
1973 

2416   Compilation* comp = Compilation::current();
2417   if (do_update) {
2418     // try to find exact type, using CHA if possible, so that loading
2419     // the klass from the object can be avoided
2420     ciType* type = obj->exact_type();
2421     if (type == nullptr) {
2422       type = obj->declared_type();
2423       type = comp->cha_exact_type(type);
2424     }
2425     assert(type == nullptr || type->is_klass(), "type should be class");
2426     exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2427 
2428     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2429   }
2430 
2431   if (!do_null && !do_update) {
2432     return result;
2433   }
2434 
2435   ciKlass* exact_signature_k = nullptr;
2436   if (do_update) {
2437     // Is the type from the signature exact (the only one possible)?
2438     exact_signature_k = signature_at_call_k->exact_klass();
2439     if (exact_signature_k == nullptr) {
2440       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2441     } else {
2442       result = exact_signature_k;
2443       // Known statically. No need to emit any code: prevent
2444       // LIR_Assembler::emit_profile_type() from emitting useless code
2445       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2446     }
2447     // exact_klass and exact_signature_k can be both non null but
2448     // different if exact_klass is loaded after the ciObject for
2449     // exact_signature_k is created.
2450     if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2451       // sometimes the type of the signature is better than the best type
2452       // the compiler has
2453       exact_klass = exact_signature_k;
2454     }
2455     if (callee_signature_k != nullptr &&
2456         callee_signature_k != signature_at_call_k) {
2457       ciKlass* improved_klass = callee_signature_k->exact_klass();
2458       if (improved_klass == nullptr) {
2459         improved_klass = comp->cha_exact_type(callee_signature_k);
2460       }
2461       if (exact_klass == nullptr && improved_klass != nullptr && exact_klass != improved_klass) {
2462         exact_klass = exact_signature_k;
2463       }
2464     }
2465     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2466   }
2467 















2468   if (!do_null && !do_update) {
2469     return result;
2470   }
2471 
2472   if (mdp == LIR_OprFact::illegalOpr) {
2473     mdp = new_register(T_METADATA);
2474     __ metadata2reg(md->constant_encoding(), mdp);
2475     if (md_base_offset != 0) {
2476       LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2477       mdp = new_pointer_register();
2478       __ leal(LIR_OprFact::address(base_type_address), mdp);
2479     }
2480   }
2481   LIRItem value(obj, this);
2482   value.load_item();
2483   __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2484                   value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != nullptr);
2485   return result;
2486 }
2487 

2501         assert(!src->is_illegal(), "check");
2502         BasicType t = src->type();
2503         if (is_reference_type(t)) {
2504           intptr_t profiled_k = parameters->type(j);
2505           Local* local = x->state()->local_at(java_index)->as_Local();
2506           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2507                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2508                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2509           // If the profile is known statically set it once for all and do not emit any code
2510           if (exact != nullptr) {
2511             md->set_parameter_type(j, exact);
2512           }
2513           j++;
2514         }
2515         java_index += type2size[t];
2516       }
2517     }
2518   }
2519 }
2520 










































2521 void LIRGenerator::do_Base(Base* x) {
2522   __ std_entry(LIR_OprFact::illegalOpr);
2523   // Emit moves from physical registers / stack slots to virtual registers
2524   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2525   IRScope* irScope = compilation()->hir()->top_scope();
2526   int java_index = 0;
2527   for (int i = 0; i < args->length(); i++) {
2528     LIR_Opr src = args->at(i);
2529     assert(!src->is_illegal(), "check");
2530     BasicType t = src->type();
2531 
2532     // Types which are smaller than int are passed as int, so
2533     // correct the type which passed.
2534     switch (t) {
2535     case T_BYTE:
2536     case T_BOOLEAN:
2537     case T_SHORT:
2538     case T_CHAR:
2539       t = T_INT;
2540       break;

2542       break;
2543     }
2544 
2545     LIR_Opr dest = new_register(t);
2546     __ move(src, dest);
2547 
2548     // Assign new location to Local instruction for this local
2549     Local* local = x->state()->local_at(java_index)->as_Local();
2550     assert(local != nullptr, "Locals for incoming arguments must have been created");
2551 #ifndef __SOFTFP__
2552     // The java calling convention passes double as long and float as int.
2553     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2554 #endif // __SOFTFP__
2555     local->set_operand(dest);
2556 #ifdef ASSERT
2557     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, nullptr);
2558 #endif
2559     java_index += type2size[t];
2560   }
2561 






2562   if (compilation()->env()->dtrace_method_probes()) {
2563     BasicTypeList signature;
2564     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
2565     signature.append(T_METADATA); // Method*
2566     LIR_OprList* args = new LIR_OprList();
2567     args->append(getThreadPointer());
2568     LIR_Opr meth = new_register(T_METADATA);
2569     __ metadata2reg(method()->constant_encoding(), meth);
2570     args->append(meth);
2571     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, nullptr);
2572   }
2573 
2574   if (method()->is_synchronized()) {
2575     LIR_Opr obj;
2576     if (method()->is_static()) {
2577       obj = new_register(T_OBJECT);
2578       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2579     } else {
2580       Local* receiver = x->state()->local_at(0)->as_Local();
2581       assert(receiver != nullptr, "must already exist");

2583     }
2584     assert(obj->is_valid(), "must be valid");
2585 
2586     if (method()->is_synchronized()) {
2587       LIR_Opr lock = syncLockOpr();
2588       __ load_stack_address_monitor(0, lock);
2589 
2590       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
2591       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2592 
2593       // receiver is guaranteed non-null so don't need CodeEmitInfo
2594       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
2595     }
2596   }
2597   // increment invocation counters if needed
2598   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2599     profile_parameters(x);
2600     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
2601     increment_invocation_counter(info);
2602   }








2603 
2604   // all blocks with a successor must end with an unconditional jump
2605   // to the successor even if they are consecutive
2606   __ jump(x->default_sux());
2607 }
2608 
2609 
2610 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2611   // construct our frame and model the production of incoming pointer
2612   // to the OSR buffer.
2613   __ osr_entry(LIR_Assembler::osrBufferPointer());
2614   LIR_Opr result = rlock_result(x);
2615   __ move(LIR_Assembler::osrBufferPointer(), result);
2616 }
2617 













2618 
2619 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2620   assert(args->length() == arg_list->length(),
2621          "args=%d, arg_list=%d", args->length(), arg_list->length());
2622   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2623     LIRItem* param = args->at(i);
2624     LIR_Opr loc = arg_list->at(i);
2625     if (loc->is_register()) {
2626       param->load_item_force(loc);
2627     } else {
2628       LIR_Address* addr = loc->as_address_ptr();
2629       param->load_for_store(addr->type());
2630       if (addr->type() == T_OBJECT) {
2631         __ move_wide(param->result(), addr);
2632       } else
2633         __ move(param->result(), addr);
2634     }
2635   }
2636 
2637   if (x->has_receiver()) {
2638     LIRItem* receiver = args->at(0);
2639     LIR_Opr loc = arg_list->at(0);
2640     if (loc->is_register()) {
2641       receiver->load_item_force(loc);
2642     } else {
2643       assert(loc->is_address(), "just checking");
2644       receiver->load_for_store(T_OBJECT);
2645       __ move_wide(receiver->result(), loc->as_address_ptr());
2646     }
2647   }
2648 }
2649 
2650 
2651 // Visits all arguments, returns appropriate items without loading them
2652 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2653   LIRItemList* argument_items = new LIRItemList();
2654   if (x->has_receiver()) {

2761   __ move(tmp, reg);
2762 }
2763 
2764 
2765 
2766 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2767 void LIRGenerator::do_IfOp(IfOp* x) {
2768 #ifdef ASSERT
2769   {
2770     ValueTag xtag = x->x()->type()->tag();
2771     ValueTag ttag = x->tval()->type()->tag();
2772     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2773     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2774     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2775   }
2776 #endif
2777 
2778   LIRItem left(x->x(), this);
2779   LIRItem right(x->y(), this);
2780   left.load_item();
2781   if (can_inline_as_constant(right.value())) {
2782     right.dont_load_item();
2783   } else {

2784     right.load_item();
2785   }
2786 
2787   LIRItem t_val(x->tval(), this);
2788   LIRItem f_val(x->fval(), this);
2789   t_val.dont_load_item();
2790   f_val.dont_load_item();
2791   LIR_Opr reg = rlock_result(x);
2792 
2793   __ cmp(lir_cond(x->cond()), left.result(), right.result());
2794   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));

















































2795 }
2796 
2797 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
2798   assert(x->number_of_arguments() == 0, "wrong type");
2799   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
2800   BasicTypeList signature;
2801   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2802   LIR_Opr reg = result_register_for(x->type());
2803   __ call_runtime_leaf(routine, getThreadTemp(),
2804                        reg, new LIR_OprList());
2805   LIR_Opr result = rlock_result(x);
2806   __ move(reg, result);
2807 }
2808 
2809 
2810 
2811 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2812   switch (x->id()) {
2813   case vmIntrinsics::_intBitsToFloat      :
2814   case vmIntrinsics::_doubleToRawLongBits :

3049   if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3050     profile_parameters_at_call(x);
3051   }
3052 
3053   if (x->recv() != nullptr) {
3054     LIRItem value(x->recv(), this);
3055     value.load_item();
3056     recv = new_register(T_OBJECT);
3057     __ move(value.result(), recv);
3058   }
3059   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3060 }
3061 
3062 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3063   int bci = x->bci_of_invoke();
3064   ciMethodData* md = x->method()->method_data_or_null();
3065   assert(md != nullptr, "Sanity");
3066   ciProfileData* data = md->bci_to_data(bci);
3067   if (data != nullptr) {
3068     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3069     ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3070     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3071 
3072     bool ignored_will_link;
3073     ciSignature* signature_at_call = nullptr;
3074     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3075 
3076     // The offset within the MDO of the entry to update may be too large
3077     // to be used in load/store instructions on some platforms. So have
3078     // profile_type() compute the address of the profile in a register.
3079     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3080         ret->type(), x->ret(), mdp,
3081         !x->needs_null_check(),
3082         signature_at_call->return_type()->as_klass(),
3083         x->callee()->signature()->return_type()->as_klass());
3084     if (exact != nullptr) {
3085       md->set_return_type(bci, exact);
3086     }
3087   }
3088 }
3089 















































3090 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3091   // We can safely ignore accessors here, since c2 will inline them anyway,
3092   // accessors are also always mature.
3093   if (!x->inlinee()->is_accessor()) {
3094     CodeEmitInfo* info = state_for(x, x->state(), true);
3095     // Notify the runtime very infrequently only to take care of counter overflows
3096     int freq_log = Tier23InlineeNotifyFreqLog;
3097     double scale;
3098     if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3099       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3100     }
3101     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3102   }
3103 }
3104 
3105 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3106   if (compilation()->is_profiling()) {
3107 #if defined(X86) && !defined(_LP64)
3108     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3109     LIR_Opr left_copy = new_register(left->type());

   1 /*
   2  * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Compilation.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"
  33 #include "ci/ciFlatArrayKlass.hpp"
  34 #include "ci/ciInlineKlass.hpp"
  35 #include "ci/ciInstance.hpp"
  36 #include "ci/ciObjArray.hpp"
  37 #include "ci/ciObjArrayKlass.hpp"
  38 #include "ci/ciUtilities.hpp"
  39 #include "compiler/compilerDefinitions.inline.hpp"
  40 #include "compiler/compilerOracle.hpp"
  41 #include "gc/shared/barrierSet.hpp"
  42 #include "gc/shared/c1/barrierSetC1.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "oops/methodCounters.hpp"
  45 #include "runtime/arguments.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "runtime/vm_version.hpp"
  49 #include "utilities/bitMap.inline.hpp"
  50 #include "utilities/macros.hpp"
  51 #include "utilities/powerOfTwo.hpp"
  52 
  53 #ifdef ASSERT
  54 #define __ gen()->lir(__FILE__, __LINE__)->
  55 #else
  56 #define __ gen()->lir()->
  57 #endif
  58 
  59 #ifndef PATCHED_ADDR
  60 #define PATCHED_ADDR  (max_jint)
  61 #endif
  62 
  63 void PhiResolverState::reset() {
  64   _virtual_operands.clear();
  65   _other_operands.clear();

 202 }
 203 
 204 
 205 //--------------------------------------------------------------
 206 // LIRItem
 207 
 208 void LIRItem::set_result(LIR_Opr opr) {
 209   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 210   value()->set_operand(opr);
 211 
 212 #ifdef ASSERT
 213   if (opr->is_virtual()) {
 214     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
 215   }
 216 #endif
 217 
 218   _result = opr;
 219 }
 220 
 221 void LIRItem::load_item() {
 222   assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
 223 
 224   if (result()->is_illegal()) {
 225     // update the items result
 226     _result = value()->operand();
 227   }
 228   if (!result()->is_register()) {
 229     LIR_Opr reg = _gen->new_register(value()->type());
 230     __ move(result(), reg);
 231     if (result()->is_constant()) {
 232       _result = reg;
 233     } else {
 234       set_result(reg);
 235     }
 236   }
 237 }
 238 
 239 
 240 void LIRItem::load_for_store(BasicType type) {
 241   if (_gen->can_store_as_constant(value(), type)) {
 242     _result = value()->operand();
 243     if (!_result->is_constant()) {

 611     assert(right_op != result_op, "malformed");
 612     __ move(left_op, result_op);
 613     left_op = result_op;
 614   }
 615 
 616   switch(code) {
 617     case Bytecodes::_iand:
 618     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 619 
 620     case Bytecodes::_ior:
 621     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 622 
 623     case Bytecodes::_ixor:
 624     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 625 
 626     default: ShouldNotReachHere();
 627   }
 628 }
 629 
 630 
 631 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
 632                                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_ie_stub) {
 633   // for slow path, use debug info for state after successful locking
 634   CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_ie_stub, scratch);
 635   __ load_stack_address_monitor(monitor_no, lock);
 636   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 637   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_ie_stub);
 638 }
 639 
 640 
 641 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 642   // setup registers
 643   LIR_Opr hdr = lock;
 644   lock = new_hdr;
 645   CodeStub* slow_path = new MonitorExitStub(lock, monitor_no);
 646   __ load_stack_address_monitor(monitor_no, lock);
 647   __ unlock_object(hdr, object, lock, scratch, slow_path);
 648 }
 649 
 650 #ifndef PRODUCT
 651 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 652   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 653     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 654   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 655     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 656   }
 657 }
 658 #endif
 659 
 660 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 661   if (allow_inline) {
 662     assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
 663     __ metadata2reg(klass->constant_encoding(), klass_reg);
 664   } else {
 665     klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 666   }
 667   // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
 668   if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
 669       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 670 
 671     StubId stub_id = klass->is_initialized() ? StubId::c1_fast_new_instance_id : StubId::c1_fast_new_instance_init_check_id;
 672 
 673     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 674 
 675     assert(klass->is_loaded(), "must be loaded");
 676     // allocate space for instance
 677     assert(klass->size_helper() > 0, "illegal instance size");
 678     const int instance_size = align_object_size(klass->size_helper());
 679     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 680                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 681   } else {
 682     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, StubId::c1_new_instance_id);
 683     __ jump(slow_path);
 684     __ branch_destination(slow_path->continuation());
 685   }
 686 }
 687 
 688 
 689 static bool is_constant_zero(Instruction* inst) {
 690   IntConstant* c = inst->type()->as_IntConstant();
 691   if (c) {
 692     return (c->value() == 0);
 693   }
 694   return false;
 695 }
 696 
 697 
 698 static bool positive_constant(Instruction* inst) {
 699   IntConstant* c = inst->type()->as_IntConstant();
 700   if (c) {
 701     return (c->value() >= 0);
 702   }
 703   return false;

 755     } else if (dst_exact_type != nullptr && dst_exact_type->is_obj_array_klass()) {
 756       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
 757       ciArrayKlass* src_type = nullptr;
 758       if (src_exact_type != nullptr && src_exact_type->is_obj_array_klass()) {
 759         src_type = (ciArrayKlass*) src_exact_type;
 760       } else if (src_declared_type != nullptr && src_declared_type->is_obj_array_klass()) {
 761         src_type = (ciArrayKlass*) src_declared_type;
 762       }
 763       if (src_type != nullptr) {
 764         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 765           is_exact = true;
 766           expected_type = dst_type;
 767         }
 768       }
 769     }
 770     // at least pass along a good guess
 771     if (expected_type == nullptr) expected_type = dst_exact_type;
 772     if (expected_type == nullptr) expected_type = src_declared_type;
 773     if (expected_type == nullptr) expected_type = dst_declared_type;
 774 
 775     if (expected_type != nullptr && expected_type->is_obj_array_klass()) {
 776       // For a direct pointer comparison, we need the refined array klass pointer
 777       expected_type = ciObjArrayKlass::make(expected_type->as_array_klass()->element_klass());
 778     }
 779 
 780     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 781     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 782   }
 783 
 784   // if a probable array type has been identified, figure out if any
 785   // of the required checks for a fast case can be elided.
 786   int flags = LIR_OpArrayCopy::all_flags;
 787 
 788   // TODO 8251971 Compare ArrayKlass::properties() of source and destination
 789   // array here instead, see also LIR_Assembler::arraycopy_inlinetype_check
 790   if (!src->is_loaded_flat_array() && !dst->is_loaded_flat_array()) {
 791     flags &= ~LIR_OpArrayCopy::always_slow_path;
 792   }
 793   if (!src->maybe_flat_array()) {
 794     flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
 795   }
 796   if (!dst->maybe_flat_array() && !dst->maybe_null_free_array()) {
 797     flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
 798   }
 799 
 800   if (!src_objarray)
 801     flags &= ~LIR_OpArrayCopy::src_objarray;
 802   if (!dst_objarray)
 803     flags &= ~LIR_OpArrayCopy::dst_objarray;
 804 
 805   if (!x->arg_needs_null_check(0))
 806     flags &= ~LIR_OpArrayCopy::src_null_check;
 807   if (!x->arg_needs_null_check(2))
 808     flags &= ~LIR_OpArrayCopy::dst_null_check;
 809 
 810 
 811   if (expected_type != nullptr) {
 812     Value length_limit = nullptr;
 813 
 814     IfOp* ifop = length->as_IfOp();
 815     if (ifop != nullptr) {
 816       // look for expressions like min(v, a.length) which ends up as
 817       //   x > y ? y : x  or  x >= y ? y : x
 818       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 819           ifop->x() == ifop->fval() &&

1472   }
1473   return _vreg_flags.at(vreg_num, f);
1474 }
1475 
1476 
1477 // Block local constant handling.  This code is useful for keeping
1478 // unpinned constants and constants which aren't exposed in the IR in
1479 // registers.  Unpinned Constant instructions have their operands
1480 // cleared when the block is finished so that other blocks can't end
1481 // up referring to their registers.
1482 
1483 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1484   assert(!x->is_pinned(), "only for unpinned constants");
1485   _unpinned_constants.append(x);
1486   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1487 }
1488 
1489 
1490 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1491   BasicType t = c->type();
1492   for (int i = 0; i < _constants.length() && !in_conditional_code(); i++) {
1493     LIR_Const* other = _constants.at(i);
1494     if (t == other->type()) {
1495       switch (t) {
1496       case T_INT:
1497       case T_FLOAT:
1498         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1499         break;
1500       case T_LONG:
1501       case T_DOUBLE:
1502         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1503         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1504         break;
1505       case T_OBJECT:
1506         if (c->as_jobject() != other->as_jobject()) continue;
1507         break;
1508       default:
1509         break;
1510       }
1511       return _reg_for_constants.at(i);
1512     }
1513   }
1514 
1515   LIR_Opr result = new_register(t);
1516   __ move((LIR_Opr)c, result);
1517   if (!in_conditional_code()) {
1518     _constants.append(c);
1519     _reg_for_constants.append(result);
1520   }
1521   return result;
1522 }
1523 
1524 void LIRGenerator::set_in_conditional_code(bool v) {
1525   assert(v != _in_conditional_code, "must change state");
1526   _in_conditional_code = v;
1527 }
1528 
1529 
1530 //------------------------field access--------------------------------------
1531 
1532 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1533   assert(x->number_of_arguments() == 4, "wrong type");
1534   LIRItem obj   (x->argument_at(0), this);  // object
1535   LIRItem offset(x->argument_at(1), this);  // offset of field
1536   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1537   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1538   assert(obj.type()->tag() == objectTag, "invalid type");
1539   assert(cmp.type()->tag() == type->tag(), "invalid type");
1540   assert(val.type()->tag() == type->tag(), "invalid type");
1541 
1542   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1543                                             obj, offset, cmp, val);
1544   set_result(x, result);
1545 }
1546 
1547 // Returns an int/long value with the null marker bit set.
1548 static LIR_Opr null_marker_mask(BasicType bt, int nm_offset) {
1549   assert(nm_offset >= 0, "field does not have null marker");
1550   jlong null_marker = 1ULL << (nm_offset << LogBitsPerByte);
1551   return (bt == T_LONG) ? LIR_OprFact::longConst(null_marker) : LIR_OprFact::intConst(null_marker);
1552 }
1553 
1554 static LIR_Opr null_marker_mask(BasicType bt, ciField* field) {
1555   assert(field->null_marker_offset() != -1, "field does not have null marker");
1556   return null_marker_mask(bt, field->null_marker_offset() - field->offset_in_bytes());
1557 }
1558 
1559 // Comment copied form templateTable_i486.cpp
1560 // ----------------------------------------------------------------------------
1561 // Volatile variables demand their effects be made known to all CPU's in
1562 // order.  Store buffers on most chips allow reads & writes to reorder; the
1563 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1564 // memory barrier (i.e., it's not sufficient that the interpreter does not
1565 // reorder volatile references, the hardware also must not reorder them).
1566 //
1567 // According to the new Java Memory Model (JMM):
1568 // (1) All volatiles are serialized wrt to each other.
1569 // ALSO reads & writes act as acquire & release, so:
1570 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1571 // the read float up to before the read.  It's OK for non-volatile memory refs
1572 // that happen before the volatile read to float down below it.
1573 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1574 // that happen BEFORE the write float down to after the write.  It's OK for
1575 // non-volatile memory refs that happen after the volatile write to float up
1576 // before it.
1577 //
1578 // We only put in barriers around volatile refs (they are expensive), not
1579 // _between_ memory refs (that would require us to track the flavor of the
1580 // previous memory refs).  Requirements (2) and (3) require some barriers
1581 // before volatile stores and after volatile loads.  These nearly cover
1582 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1583 // case is placed after volatile-stores although it could just as well go
1584 // before volatile-loads.
1585 
1586 
1587 void LIRGenerator::do_StoreField(StoreField* x) {
1588   ciField* field = x->field();
1589   bool needs_patching = x->needs_patching();
1590   bool is_volatile = field->is_volatile();
1591   BasicType field_type = x->field_type();
1592 
1593   CodeEmitInfo* info = nullptr;
1594   if (needs_patching) {
1595     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1596     info = state_for(x, x->state_before());
1597   } else if (x->needs_null_check()) {
1598     NullCheck* nc = x->explicit_null_check();
1599     if (nc == nullptr) {
1600       info = state_for(x);
1601     } else {
1602       info = state_for(nc);
1603     }
1604   }
1605 
1606   LIRItem object(x->obj(), this);
1607   LIRItem value(x->value(),  this);
1608 
1609   object.load_item();
1610 
1611   if (field->is_flat()) {
1612     value.load_item();








1613   } else {
1614     if (is_volatile || needs_patching) {
1615       // load item if field is volatile (fewer special cases for volatiles)
1616       // load item if field not initialized
1617       // load item if field not constant
1618       // because of code patching we cannot inline constants
1619       if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1620         value.load_byte_item();
1621       } else  {
1622         value.load_item();
1623       }
1624     } else {
1625       value.load_for_store(field_type);
1626     }
1627   }
1628 
1629   set_no_result(x);
1630 
1631 #ifndef PRODUCT
1632   if (PrintNotLoaded && needs_patching) {
1633     tty->print_cr("   ###class not loaded at store_%s bci %d",
1634                   x->is_static() ?  "static" : "field", x->printable_bci());
1635   }
1636 #endif
1637 
1638   if (x->needs_null_check() &&
1639       (needs_patching ||
1640        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1641     // Emit an explicit null check because the offset is too large.
1642     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1643     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1644     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1645   }
1646 
1647   DecoratorSet decorators = IN_HEAP;
1648   if (is_volatile) {
1649     decorators |= MO_SEQ_CST;
1650   }
1651   if (needs_patching) {
1652     decorators |= C1_NEEDS_PATCHING;
1653   }
1654 
1655   if (field->is_flat()) {
1656     ciInlineKlass* vk = field->type()->as_inline_klass();
1657 
1658 #ifdef ASSERT
1659     assert(field->is_atomic(), "No atomic access required %s.%s", field->holder()->name()->as_utf8(), field->name()->as_utf8());
1660     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
1661     assert(!vk->contains_oops() || !UseZGC, "ZGC does not support embedded oops in flat fields");
1662 #endif
1663 
1664     // Zero the payload
1665     BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
1666     LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
1667     LIR_Opr zero = (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0);
1668     __ move(zero, payload);
1669 
1670     bool is_constant_null = value.is_constant() && value.value()->is_null_obj();
1671     if (!is_constant_null) {
1672       LabelObj* L_isNull = new LabelObj();
1673       bool needs_null_check = !value.is_constant();
1674       if (needs_null_check) {
1675         __ cmp(lir_cond_equal, value.result(), LIR_OprFact::oopConst(nullptr));
1676         __ branch(lir_cond_equal, L_isNull->label());
1677       }
1678       // Load payload (if not empty) and set null marker (if not null-free)
1679       if (!vk->is_empty()) {
1680         access_load_at(decorators, bt, value, LIR_OprFact::intConst(vk->payload_offset()), payload);
1681       }
1682       if (!field->is_null_free()) {
1683         __ logical_or(payload, null_marker_mask(bt, field), payload);
1684       }
1685       if (needs_null_check) {
1686         __ branch_destination(L_isNull->label());
1687       }
1688     }
1689     access_store_at(decorators, bt, object, LIR_OprFact::intConst(x->offset()), payload,
1690                     // Make sure to emit an implicit null check and pass the information
1691                     // that this is a flat store that might require gc barriers for oop fields.
1692                     info != nullptr ? new CodeEmitInfo(info) : nullptr, info, vk);
1693     return;
1694   }
1695 
1696   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1697                   value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1698 }
1699 
1700 // Wrap an already computed address register as a C1 Instruction so it
1701 // can be passed as LIRItem into access_load_at() / access_store_at().
1702 class ComputedAddressValue: public Instruction {
1703  public:
1704   ComputedAddressValue(ValueType* type, LIR_Opr addr) : Instruction(type) {
1705     set_operand(addr);
1706   }
1707   virtual void input_values_do(ValueVisitor*) {}
1708   virtual void visit(InstructionVisitor* v)   {}
1709   virtual const char* name() const { return "ComputedAddressValue"; }
1710 };
1711 
1712 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
1713 #ifndef _LP64
1714   // We need to be careful with overflows in 32-bit arithmetic
1715   Unimplemented();
1716 #endif
1717   ciType* array_type = array.value()->declared_type();
1718   ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
1719   assert(flat_array_klass->is_loaded(), "must be");
1720 
1721   int array_header_size = flat_array_klass->array_header_in_bytes();
1722   int shift = flat_array_klass->log2_element_size();
1723 
1724   LIR_Opr index_op = new_register(T_LONG);
1725   if (index.result()->is_constant()) {
1726     jint const_index = index.result()->as_jint();
1727     __ move(LIR_OprFact::longConst(static_cast<jlong>(const_index) << shift), index_op);
1728   } else {
1729     __ convert(Bytecodes::_i2l, index.result(), index_op);
1730     // Need to shift manually, as LIR_Address can scale only up to 3.
1731     __ shift_left(index_op, shift, index_op);
1732   }
1733 
1734   LIR_Opr elm_op = new_pointer_register();
1735   LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
1736   __ leal(LIR_OprFact::address(elm_address), elm_op);
1737   return elm_op;
1738 }
1739 
1740 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, size_t sub_offset) {
1741   assert(field != nullptr, "Need a subelement type specified");
1742 
1743   // Find the starting address of the source (inside the array)
1744   LIR_Opr elm_op = get_and_load_element_address(array, index);
1745 
1746   BasicType subelt_type = field->type()->basic_type();
1747   ComputedAddressValue* elm_resolved_addr = new ComputedAddressValue(as_ValueType(subelt_type), elm_op);
1748   LIRItem elm_item(elm_resolved_addr, this);
1749 
1750   DecoratorSet decorators = IN_HEAP;
1751   access_load_at(decorators, subelt_type,
1752                  elm_item, LIR_OprFact::longConst(sub_offset), result,
1753                  nullptr, nullptr);
1754 }
1755 
1756 LIR_Opr LIRGenerator::access_flat_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
1757                                         ciField* field, size_t sub_offset) {
1758   assert(sub_offset == 0 || field != nullptr, "Sanity check");
1759 
1760   // Find the starting address of the source (inside the array)
1761   LIR_Opr elm_op = get_and_load_element_address(array, index);
1762 
1763   ciFlatArrayKlass* array_klass = array.value()->declared_type()->as_flat_array_klass();
1764   ciInlineKlass* elem_klass = nullptr;
1765   if (field != nullptr) {
1766     elem_klass = field->type()->as_inline_klass();
1767   } else {
1768     elem_klass = array_klass->element_klass()->as_inline_klass();
1769   }
1770 
1771   bool null_free = array_klass->is_elem_null_free();
1772   bool atomic = array_klass->is_elem_atomic();
1773   assert(null_free || atomic, "nullable flat arrays must use an atomic layout");
1774   if (atomic) {
1775     assert(field == nullptr && sub_offset == 0, "delayed sub-element access is only supported for non-atomic arrays");
1776     BasicType bt = elem_klass->atomic_size_to_basic_type(null_free);
1777     LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
1778     ComputedAddressValue* elm_resolved_addr = new ComputedAddressValue(as_ValueType(bt), elm_op);
1779     LIRItem elm_item(elm_resolved_addr, this);
1780     DecoratorSet decorators = IN_HEAP;
1781     if (is_load) {
1782       access_load_at(decorators, bt, elm_item, LIR_OprFact::intConst(0), payload, nullptr, nullptr);
1783       access_store_at(decorators, bt, obj_item, LIR_OprFact::intConst(elem_klass->payload_offset()), payload,
1784                       nullptr, nullptr, elem_klass);
1785       // Null check is performed in the caller
1786     } else {
1787       // Zero the payload
1788       LIR_Opr zero = (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0);
1789       __ move(zero, payload);
1790 
1791       if (null_free) {
1792         if (!elem_klass->is_empty()) {
1793           access_load_at(decorators, bt, obj_item, LIR_OprFact::intConst(elem_klass->payload_offset()), payload);
1794         }
1795       } else {
1796         bool is_constant_null = obj_item.is_constant() && obj_item.value()->is_null_obj();
1797         if (!is_constant_null) {
1798           LabelObj* L_isNull = new LabelObj();
1799           bool needs_null_check = !obj_item.is_constant();
1800           if (needs_null_check) {
1801             __ cmp(lir_cond_equal, obj_item.result(), LIR_OprFact::oopConst(nullptr));
1802             __ branch(lir_cond_equal, L_isNull->label());
1803           }
1804           // Load payload (if not empty) and set null marker.
1805           if (!elem_klass->is_empty()) {
1806             access_load_at(decorators, bt, obj_item, LIR_OprFact::intConst(elem_klass->payload_offset()), payload);
1807           }
1808           __ logical_or(payload, null_marker_mask(bt, elem_klass->null_marker_offset_in_payload()), payload);
1809           if (needs_null_check) {
1810             __ branch_destination(L_isNull->label());
1811           }
1812         }
1813       }
1814       access_store_at(decorators, bt, elm_item, LIR_OprFact::intConst(0), payload, nullptr, nullptr, elem_klass);
1815     }
1816     return payload;
1817   }
1818 
1819   for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1820     ciField* inner_field = elem_klass->nonstatic_field_at(i);
1821     assert(!inner_field->is_flat(), "flat fields must have been expanded");
1822     int obj_offset = inner_field->offset_in_bytes();
1823     size_t elm_offset = obj_offset - elem_klass->payload_offset() + sub_offset; // object header is not stored in array.
1824     BasicType field_type = inner_field->type()->basic_type();
1825 
1826     // Types which are smaller than int are still passed in an int register.
1827     BasicType reg_type = field_type;
1828     switch (reg_type) {
1829     case T_BYTE:
1830     case T_BOOLEAN:
1831     case T_SHORT:
1832     case T_CHAR:
1833       reg_type = T_INT;
1834       break;
1835     default:
1836       break;
1837     }
1838 
1839     LIR_Opr temp = new_register(reg_type);
1840     ComputedAddressValue* elm_resolved_addr = new ComputedAddressValue(as_ValueType(field_type), elm_op);
1841     LIRItem elm_item(elm_resolved_addr, this);
1842 
1843     DecoratorSet decorators = IN_HEAP;
1844     if (is_load) {
1845       access_load_at(decorators, field_type,
1846                      elm_item, LIR_OprFact::longConst(elm_offset), temp,
1847                      nullptr, nullptr);
1848       access_store_at(decorators, field_type,
1849                       obj_item, LIR_OprFact::intConst(obj_offset), temp,
1850                       nullptr, nullptr);
1851     } else {
1852       access_load_at(decorators, field_type,
1853                      obj_item, LIR_OprFact::intConst(obj_offset), temp,
1854                      nullptr, nullptr);
1855       access_store_at(decorators, field_type,
1856                       elm_item, LIR_OprFact::longConst(elm_offset), temp,
1857                       nullptr, nullptr);
1858     }
1859   }
1860   return LIR_OprFact::illegalOpr;
1861 }
1862 
1863 void LIRGenerator::check_flat_array(LIR_Opr array, CodeStub* slow_path) {
1864   LIR_Opr tmp = new_register(T_METADATA);
1865   __ check_flat_array(array, tmp, slow_path);
1866 }
1867 
1868 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1869   LabelObj* L_end = new LabelObj();
1870   LIR_Opr tmp = new_register(T_METADATA);
1871   __ check_null_free_array(array.result(), tmp);
1872   __ branch(lir_cond_equal, L_end->label());
1873   __ null_check(value.result(), info);
1874   __ branch_destination(L_end->label());
1875 }
1876 
1877 bool LIRGenerator::needs_flat_array_store_check(StoreIndexed* x) {
1878   if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
1879     ciType* type = x->value()->declared_type();
1880     if (type != nullptr && type->is_klass()) {
1881       ciKlass* klass = type->as_klass();
1882       if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->maybe_flat_in_array())) {
1883         // This is known to be a non-flat object. If the array is a flat array,
1884         // it will be caught by the code generated by array_store_check().
1885         return false;
1886       }
1887     }
1888     // We're not 100% sure, so let's do the flat_array_store_check.
1889     return true;
1890   }
1891   return false;
1892 }
1893 
1894 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1895   return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1896 }
1897 
1898 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1899   assert(x->is_pinned(),"");
1900   assert(x->elt_type() != T_ARRAY, "never used");
1901   bool is_loaded_flat_array = x->array()->is_loaded_flat_array();
1902   bool needs_range_check = x->compute_needs_range_check();
1903   bool use_length = x->length() != nullptr;
1904   bool obj_store = is_reference_type(x->elt_type());
1905   bool needs_store_check = obj_store && !(is_loaded_flat_array && x->is_exact_flat_array_store()) &&
1906                                         (x->value()->as_Constant() == nullptr ||
1907                                          !get_jobject_constant(x->value())->is_null_object());
1908 
1909   LIRItem array(x->array(), this);
1910   LIRItem index(x->index(), this);
1911   LIRItem value(x->value(), this);
1912   LIRItem length(this);
1913 
1914   array.load_item();
1915   index.load_nonconstant();
1916 
1917   if (use_length && needs_range_check) {
1918     length.set_instruction(x->length());
1919     length.load_item();

1920   }
1921 
1922   if (needs_store_check || x->check_boolean()
1923       || is_loaded_flat_array || needs_flat_array_store_check(x) || needs_null_free_array_store_check(x)) {
1924     value.load_item();
1925   } else {
1926     value.load_for_store(x->elt_type());
1927   }
1928 
1929   set_no_result(x);
1930 
1931   // the CodeEmitInfo must be duplicated for each different
1932   // LIR-instruction because spilling can occur anywhere between two
1933   // instructions and so the debug information must be different
1934   CodeEmitInfo* range_check_info = state_for(x);
1935   CodeEmitInfo* null_check_info = nullptr;
1936   if (x->needs_null_check()) {
1937     null_check_info = new CodeEmitInfo(range_check_info);
1938   }
1939 
1940   if (needs_range_check) {
1941     if (use_length) {
1942       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1943       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1944     } else {
1945       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1946       // range_check also does the null check
1947       null_check_info = nullptr;
1948     }
1949   }
1950 
1951   if (GenerateArrayStoreCheck && needs_store_check) {
1952     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1953     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1954   }
1955 
1956   if (x->should_profile()) {
1957     if (is_loaded_flat_array) {
1958       // No need to profile a store to a flat array of known type. This can happen if
1959       // the type only became known after optimizations (for example, after the PhiSimplifier).
1960       x->set_should_profile(false);
1961     } else {
1962       int bci = x->profiled_bci();
1963       ciMethodData* md = x->profiled_method()->method_data();
1964       assert(md != nullptr, "Sanity");
1965       ciProfileData* data = md->bci_to_data(bci);
1966       assert(data != nullptr && data->is_ArrayStoreData(), "incorrect profiling entry");
1967       ciArrayStoreData* store_data = (ciArrayStoreData*)data;
1968       profile_array_type(x, md, store_data);
1969       assert(store_data->is_ArrayStoreData(), "incorrect profiling entry");
1970       if (x->array()->maybe_null_free_array()) {
1971         profile_null_free_array(array, md, data);
1972       }
1973     }
1974   }
1975 
1976   if (is_loaded_flat_array) {
1977     ciFlatArrayKlass* array_klass = x->array()->declared_type()->as_flat_array_klass();
1978     ciInlineKlass* elem_klass = array_klass->element_klass()->as_inline_klass();
1979     bool null_free = array_klass->is_elem_null_free();
1980     if (null_free && !x->value()->is_null_free()) {
1981       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1982     }
1983     // If array element is an empty null-free inline type, no need to copy anything.
1984     // Nullable empty arrays still need their null marker updated.
1985     if (!elem_klass->is_empty() || !null_free) {
1986       access_flat_array(false, array, index, value);
1987     }
1988   } else {
1989     StoreFlattenedArrayStub* slow_path = nullptr;
1990 
1991     if (needs_flat_array_store_check(x)) {
1992       // Check if we indeed have a flat array
1993       index.load_item();
1994       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
1995       check_flat_array(array.result(), slow_path);
1996       set_in_conditional_code(true);
1997     }
1998 
1999     if (needs_null_free_array_store_check(x)) {
2000       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
2001       check_null_free_array(array, value, info);
2002     }
2003 
2004     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2005     if (x->check_boolean()) {
2006       decorators |= C1_MASK_BOOLEAN;
2007     }
2008 
2009     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), nullptr, null_check_info);
2010     if (slow_path != nullptr) {
2011       __ branch_destination(slow_path->continuation());
2012       set_in_conditional_code(false);
2013     }
2014   }
2015 }
2016 
2017 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
2018                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
2019                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
2020   decorators |= ACCESS_READ;
2021   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
2022   if (access.is_raw()) {
2023     _barrier_set->BarrierSetC1::load_at(access, result);
2024   } else {
2025     _barrier_set->load_at(access, result);
2026   }
2027 }
2028 
2029 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
2030                                LIR_Opr addr, LIR_Opr result) {
2031   decorators |= ACCESS_READ;
2032   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
2033   access.set_resolved_addr(addr);
2034   if (access.is_raw()) {
2035     _barrier_set->BarrierSetC1::load(access, result);
2036   } else {
2037     _barrier_set->load(access, result);
2038   }
2039 }
2040 
2041 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
2042                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
2043                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info,
2044                                    ciInlineKlass* vk) {
2045   decorators |= ACCESS_WRITE;
2046   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info, vk);
2047   if (access.is_raw()) {
2048     _barrier_set->BarrierSetC1::store_at(access, value);
2049   } else {
2050     _barrier_set->store_at(access, value);
2051   }
2052 }
2053 
2054 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
2055                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
2056   decorators |= ACCESS_READ;
2057   decorators |= ACCESS_WRITE;
2058   // Atomic operations are SEQ_CST by default
2059   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2060   LIRAccess access(this, decorators, base, offset, type);
2061   if (access.is_raw()) {
2062     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
2063   } else {
2064     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
2065   }
2066 }

2077   } else {
2078     return _barrier_set->atomic_xchg_at(access, value);
2079   }
2080 }
2081 
2082 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
2083                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
2084   decorators |= ACCESS_READ;
2085   decorators |= ACCESS_WRITE;
2086   // Atomic operations are SEQ_CST by default
2087   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2088   LIRAccess access(this, decorators, base, offset, type);
2089   if (access.is_raw()) {
2090     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
2091   } else {
2092     return _barrier_set->atomic_add_at(access, value);
2093   }
2094 }
2095 
2096 void LIRGenerator::do_LoadField(LoadField* x) {
2097   ciField* field = x->field();
2098   bool needs_patching = x->needs_patching();
2099   bool is_volatile = field->is_volatile();
2100   BasicType field_type = x->field_type();
2101 
2102   CodeEmitInfo* info = nullptr;
2103   if (needs_patching) {
2104     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
2105     info = state_for(x, x->state_before());
2106   } else if (x->needs_null_check()) {
2107     NullCheck* nc = x->explicit_null_check();
2108     if (nc == nullptr) {
2109       info = state_for(x);
2110     } else {
2111       info = state_for(nc);
2112     }
2113   }
2114 
2115   LIRItem object(x->obj(), this);
2116 
2117   object.load_item();
2118 
2119 #ifndef PRODUCT

2130        stress_deopt)) {
2131     LIR_Opr obj = object.result();
2132     if (stress_deopt) {
2133       obj = new_register(T_OBJECT);
2134       __ move(LIR_OprFact::oopConst(nullptr), obj);
2135     }
2136     // Emit an explicit null check because the offset is too large.
2137     // If the class is not loaded and the object is null, we need to deoptimize to throw a
2138     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2139     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2140   }
2141 
2142   DecoratorSet decorators = IN_HEAP;
2143   if (is_volatile) {
2144     decorators |= MO_SEQ_CST;
2145   }
2146   if (needs_patching) {
2147     decorators |= C1_NEEDS_PATCHING;
2148   }
2149 
2150   if (field->is_flat()) {
2151     ciInlineKlass* vk = field->type()->as_inline_klass();
2152 #ifdef ASSERT
2153     assert(field->is_atomic(), "No atomic access required");
2154     assert(x->state_before() != nullptr, "Needs state before");
2155 #endif
2156 
2157     // Allocate buffer (we can't easily do this conditionally on the null check below
2158     // because branches added in the LIR are opaque to the register allocator).
2159     NewInstance* buffer = new NewInstance(vk, x->state_before(), false, true);
2160     do_NewInstance(buffer);
2161     LIRItem dest(buffer, this);
2162 
2163     // Copy the payload to the buffer
2164     BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
2165     LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
2166     access_load_at(decorators, bt, object, LIR_OprFact::intConst(field->offset_in_bytes()), payload,
2167                    // Make sure to emit an implicit null check
2168                    info ? new CodeEmitInfo(info) : nullptr, info);
2169     access_store_at(decorators, bt, dest, LIR_OprFact::intConst(vk->payload_offset()), payload);
2170 
2171     if (field->is_null_free()) {
2172       set_result(x, buffer->operand());
2173     } else {
2174       // Check the null marker and set result to null if it's not set
2175       __ logical_and(payload, null_marker_mask(bt, field), payload);
2176       __ cmp(lir_cond_equal, payload, (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0));
2177       __ cmove(lir_cond_equal, LIR_OprFact::oopConst(nullptr), buffer->operand(), rlock_result(x), T_OBJECT);
2178     }
2179 
2180     // Ensure the copy is visible before any subsequent store that publishes the buffer.
2181     __ membar_storestore();
2182     return;
2183   }
2184 
2185   LIR_Opr result = rlock_result(x, field_type);
2186   access_load_at(decorators, field_type,
2187                  object, LIR_OprFact::intConst(x->offset()), result,
2188                  info ? new CodeEmitInfo(info) : nullptr, info);
2189 }
2190 
2191 // int/long jdk.internal.util.Preconditions.checkIndex
2192 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
2193   assert(x->number_of_arguments() == 3, "wrong type");
2194   LIRItem index(x->argument_at(0), this);
2195   LIRItem length(x->argument_at(1), this);
2196   LIRItem oobef(x->argument_at(2), this);
2197 
2198   index.load_item();
2199   length.load_item();
2200   oobef.load_item();
2201 
2202   LIR_Opr result = rlock_result(x);
2203   // x->state() is created from copy_state_for_exception, it does not contains arguments
2204   // we should prepare them before entering into interpreter mode due to deoptimization.

2313       __ move(LIR_OprFact::oopConst(nullptr), obj);
2314       __ null_check(obj, new CodeEmitInfo(null_check_info));
2315     }
2316   }
2317 
2318   if (needs_range_check) {
2319     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2320       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2321     } else if (use_length) {
2322       // TODO: use a (modified) version of array_range_check that does not require a
2323       //       constant length to be loaded to a register
2324       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2325       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2326     } else {
2327       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2328       // The range check performs the null check, so clear it out for the load
2329       null_check_info = nullptr;
2330     }
2331   }
2332 
2333   ciMethodData* md = nullptr;
2334   ciProfileData* data = nullptr;
2335   if (x->should_profile()) {
2336     if (x->array()->is_loaded_flat_array()) {
2337       // No need to profile a load from a flat array of known type. This can happen if
2338       // the type only became known after optimizations (for example, after the PhiSimplifier).
2339       x->set_should_profile(false);
2340     } else {
2341       int bci = x->profiled_bci();
2342       md = x->profiled_method()->method_data();
2343       assert(md != nullptr, "Sanity");
2344       data = md->bci_to_data(bci);
2345       assert(data != nullptr && data->is_ArrayLoadData(), "incorrect profiling entry");
2346       ciArrayLoadData* load_data = (ciArrayLoadData*)data;
2347       profile_array_type(x, md, load_data);
2348     }
2349   }
2350 
2351   Value element = nullptr;
2352   if (x->buffer() != nullptr) {
2353     assert(x->array()->is_loaded_flat_array(), "must be");
2354     // Find the destination address (of the NewInlineTypeInstance).
2355     LIRItem buffer(x->buffer(), this);
2356     LIR_Opr payload = access_flat_array(true, array, index, buffer,
2357                                         x->delayed() == nullptr ? nullptr : x->delayed()->field(),
2358                                         x->delayed() == nullptr ? 0 : x->delayed()->offset());
2359     ciFlatArrayKlass* array_klass = x->array()->declared_type()->as_flat_array_klass();
2360     if (array_klass->is_elem_null_free()) {
2361       set_result(x, x->buffer()->operand());
2362     } else {
2363       // Check the null marker and set result to null if it's not set
2364       ciInlineKlass* elem_klass = array_klass->element_klass()->as_inline_klass();
2365       BasicType bt = elem_klass->atomic_size_to_basic_type(false);
2366       assert(payload->is_valid(), "nullable flat array load must return the atomic payload");
2367       __ logical_and(payload, null_marker_mask(bt, elem_klass->null_marker_offset_in_payload()), payload);
2368       __ cmp(lir_cond_equal, payload, (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0));
2369       __ cmove(lir_cond_equal, LIR_OprFact::oopConst(nullptr), buffer.result(), rlock_result(x), T_OBJECT);
2370     }
2371   } else if (x->delayed() != nullptr) {
2372     assert(x->array()->is_loaded_flat_array(), "must be");
2373     LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
2374     access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
2375   } else {
2376     LIR_Opr result = rlock_result(x, x->elt_type());
2377     LoadFlattenedArrayStub* slow_path = nullptr;
2378 
2379     if (x->should_profile() && x->array()->maybe_null_free_array()) {
2380       profile_null_free_array(array, md, data);
2381     }
2382 
2383     if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
2384       assert(x->delayed() == nullptr, "Delayed LoadIndexed only apply to loaded_flat_arrays");
2385       index.load_item();
2386       // if we are loading from a flat array, load it using a runtime call
2387       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
2388       check_flat_array(array.result(), slow_path);
2389       set_in_conditional_code(true);
2390     }
2391 
2392     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2393     access_load_at(decorators, x->elt_type(),
2394                    array, index.result(), result,
2395                    nullptr, null_check_info);
2396 
2397     if (slow_path != nullptr) {
2398       __ branch_destination(slow_path->continuation());
2399       set_in_conditional_code(false);
2400     }
2401 
2402     element = x;
2403   }
2404 
2405   if (x->should_profile()) {
2406     profile_element_type(element, md, (ciArrayLoadData*)data);
2407   }
2408 }
2409 
2410 
2411 void LIRGenerator::do_NullCheck(NullCheck* x) {
2412   if (x->can_trap()) {
2413     LIRItem value(x->obj(), this);
2414     value.load_item();
2415     CodeEmitInfo* info = state_for(x);
2416     __ null_check(value.result(), info);
2417   }
2418 }
2419 
2420 
2421 void LIRGenerator::do_TypeCast(TypeCast* x) {
2422   LIRItem value(x->obj(), this);
2423   value.load_item();
2424   // the result is the same as from the node we are casting
2425   set_result(x, value.result());
2426 }
2427 

2870   Compilation* comp = Compilation::current();
2871   if (do_update) {
2872     // try to find exact type, using CHA if possible, so that loading
2873     // the klass from the object can be avoided
2874     ciType* type = obj->exact_type();
2875     if (type == nullptr) {
2876       type = obj->declared_type();
2877       type = comp->cha_exact_type(type);
2878     }
2879     assert(type == nullptr || type->is_klass(), "type should be class");
2880     exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2881 
2882     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2883   }
2884 
2885   if (!do_null && !do_update) {
2886     return result;
2887   }
2888 
2889   ciKlass* exact_signature_k = nullptr;
2890   if (do_update && signature_at_call_k != nullptr) {
2891     // Is the type from the signature exact (the only one possible)?
2892     exact_signature_k = signature_at_call_k->exact_klass();
2893     if (exact_signature_k == nullptr) {
2894       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2895     } else {
2896       result = exact_signature_k;
2897       // Known statically. No need to emit any code: prevent
2898       // LIR_Assembler::emit_profile_type() from emitting useless code
2899       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2900     }
2901     // exact_klass and exact_signature_k can be both non null but
2902     // different if exact_klass is loaded after the ciObject for
2903     // exact_signature_k is created.
2904     if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2905       // sometimes the type of the signature is better than the best type
2906       // the compiler has
2907       exact_klass = exact_signature_k;
2908     }
2909     if (callee_signature_k != nullptr &&
2910         callee_signature_k != signature_at_call_k) {
2911       ciKlass* improved_klass = callee_signature_k->exact_klass();
2912       if (improved_klass == nullptr) {
2913         improved_klass = comp->cha_exact_type(callee_signature_k);
2914       }
2915       if (exact_klass == nullptr && improved_klass != nullptr && exact_klass != improved_klass) {
2916         exact_klass = exact_signature_k;
2917       }
2918     }
2919     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2920   }
2921 
2922   if (exact_klass != nullptr && exact_klass->is_obj_array_klass()) {
2923     ciArrayKlass* exact_array_klass = exact_klass->as_array_klass();
2924     if (exact_array_klass->is_refined()) {
2925       do_update = ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2926     } else if (exact_klass->can_be_inline_array_klass()) {
2927       // Inline type arrays can have additional properties. Load the klass unless
2928       // the C1 type already carries refined array properties.
2929       exact_klass = nullptr;
2930       do_update = true;
2931     } else {
2932       // For a direct pointer comparison, we need the refined array klass pointer
2933       exact_klass = ciObjArrayKlass::make(exact_array_klass->element_klass());
2934       do_update = ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2935     }
2936   }
2937   if (!do_null && !do_update) {
2938     return result;
2939   }
2940 
2941   if (mdp == LIR_OprFact::illegalOpr) {
2942     mdp = new_register(T_METADATA);
2943     __ metadata2reg(md->constant_encoding(), mdp);
2944     if (md_base_offset != 0) {
2945       LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2946       mdp = new_pointer_register();
2947       __ leal(LIR_OprFact::address(base_type_address), mdp);
2948     }
2949   }
2950   LIRItem value(obj, this);
2951   value.load_item();
2952   __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2953                   value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != nullptr);
2954   return result;
2955 }
2956 

2970         assert(!src->is_illegal(), "check");
2971         BasicType t = src->type();
2972         if (is_reference_type(t)) {
2973           intptr_t profiled_k = parameters->type(j);
2974           Local* local = x->state()->local_at(java_index)->as_Local();
2975           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2976                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2977                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2978           // If the profile is known statically set it once for all and do not emit any code
2979           if (exact != nullptr) {
2980             md->set_parameter_type(j, exact);
2981           }
2982           j++;
2983         }
2984         java_index += type2size[t];
2985       }
2986     }
2987   }
2988 }
2989 
2990 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
2991   assert(md != nullptr && data != nullptr, "should have been initialized");
2992   LIR_Opr mdp = new_register(T_METADATA);
2993   __ metadata2reg(md->constant_encoding(), mdp);
2994   LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
2995   LIR_Opr flags = new_register(T_INT);
2996   __ move(addr, flags);
2997   LIR_Opr update;
2998   if (condition != lir_cond_always) {
2999     update = new_register(T_INT);
3000     __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
3001   } else {
3002     update = LIR_OprFact::intConst(flag);
3003   }
3004   __ logical_or(flags, update, flags);
3005   __ store(flags, addr);
3006 }
3007 
3008 void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ciProfileData* data) {
3009   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3010   LabelObj* L_end = new LabelObj();
3011   LIR_Opr tmp = new_register(T_METADATA);
3012   __ check_null_free_array(array.result(), tmp);
3013 
3014   profile_flags(md, data, ArrayStoreData::null_free_array_byte_constant(), lir_cond_equal);
3015 }
3016 
3017 template <class ArrayData> void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ArrayData*& load_store) {
3018   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3019   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3020   profile_type(md, md->byte_offset_of_slot(load_store, ArrayData::array_offset()), 0,
3021                load_store->array()->type(), x->array(), mdp, true, nullptr, nullptr);
3022 }
3023 
3024 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadData* load_data) {
3025   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3026   assert(md != nullptr && load_data != nullptr, "should have been initialized");
3027   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3028   profile_type(md, md->byte_offset_of_slot(load_data, ArrayLoadData::element_offset()), 0,
3029                load_data->element()->type(), element, mdp, false, nullptr, nullptr);
3030 }
3031 
3032 void LIRGenerator::do_Base(Base* x) {
3033   __ std_entry(LIR_OprFact::illegalOpr);
3034   // Emit moves from physical registers / stack slots to virtual registers
3035   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3036   IRScope* irScope = compilation()->hir()->top_scope();
3037   int java_index = 0;
3038   for (int i = 0; i < args->length(); i++) {
3039     LIR_Opr src = args->at(i);
3040     assert(!src->is_illegal(), "check");
3041     BasicType t = src->type();
3042 
3043     // Types which are smaller than int are passed as int, so
3044     // correct the type which passed.
3045     switch (t) {
3046     case T_BYTE:
3047     case T_BOOLEAN:
3048     case T_SHORT:
3049     case T_CHAR:
3050       t = T_INT;
3051       break;

3053       break;
3054     }
3055 
3056     LIR_Opr dest = new_register(t);
3057     __ move(src, dest);
3058 
3059     // Assign new location to Local instruction for this local
3060     Local* local = x->state()->local_at(java_index)->as_Local();
3061     assert(local != nullptr, "Locals for incoming arguments must have been created");
3062 #ifndef __SOFTFP__
3063     // The java calling convention passes double as long and float as int.
3064     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
3065 #endif // __SOFTFP__
3066     local->set_operand(dest);
3067 #ifdef ASSERT
3068     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, nullptr);
3069 #endif
3070     java_index += type2size[t];
3071   }
3072 
3073   // Check if we need a membar at the beginning of the java.lang.Object
3074   // constructor to satisfy the memory model for strict fields.
3075   if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
3076     __ membar_storestore();
3077   }
3078 
3079   if (compilation()->env()->dtrace_method_probes()) {
3080     BasicTypeList signature;
3081     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3082     signature.append(T_METADATA); // Method*
3083     LIR_OprList* args = new LIR_OprList();
3084     args->append(getThreadPointer());
3085     LIR_Opr meth = new_register(T_METADATA);
3086     __ metadata2reg(method()->constant_encoding(), meth);
3087     args->append(meth);
3088     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, nullptr);
3089   }
3090 
3091   if (method()->is_synchronized()) {
3092     LIR_Opr obj;
3093     if (method()->is_static()) {
3094       obj = new_register(T_OBJECT);
3095       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
3096     } else {
3097       Local* receiver = x->state()->local_at(0)->as_Local();
3098       assert(receiver != nullptr, "must already exist");

3100     }
3101     assert(obj->is_valid(), "must be valid");
3102 
3103     if (method()->is_synchronized()) {
3104       LIR_Opr lock = syncLockOpr();
3105       __ load_stack_address_monitor(0, lock);
3106 
3107       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
3108       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3109 
3110       // receiver is guaranteed non-null so don't need CodeEmitInfo
3111       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
3112     }
3113   }
3114   // increment invocation counters if needed
3115   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3116     profile_parameters(x);
3117     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
3118     increment_invocation_counter(info);
3119   }
3120   if (method()->has_scalarized_args()) {
3121     // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
3122     // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
3123     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), nullptr, false);
3124     CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
3125     __ append(new LIR_Op0(lir_check_orig_pc));
3126     __ branch(lir_cond_notEqual, deopt_stub);
3127   }
3128 
3129   // all blocks with a successor must end with an unconditional jump
3130   // to the successor even if they are consecutive
3131   __ jump(x->default_sux());
3132 }
3133 
3134 
3135 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3136   // construct our frame and model the production of incoming pointer
3137   // to the OSR buffer.
3138   __ osr_entry(LIR_Assembler::osrBufferPointer());
3139   LIR_Opr result = rlock_result(x);
3140   __ move(LIR_Assembler::osrBufferPointer(), result);
3141 }
3142 
3143 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3144   if (loc->is_register()) {
3145     param->load_item_force(loc);
3146   } else {
3147     LIR_Address* addr = loc->as_address_ptr();
3148     param->load_for_store(addr->type());
3149     if (addr->type() == T_OBJECT) {
3150       __ move_wide(param->result(), addr);
3151     } else {
3152       __ move(param->result(), addr);
3153     }
3154   }
3155 }
3156 
3157 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3158   assert(args->length() == arg_list->length(),
3159          "args=%d, arg_list=%d", args->length(), arg_list->length());
3160   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3161     LIRItem* param = args->at(i);
3162     LIR_Opr loc = arg_list->at(i);
3163     invoke_load_one_argument(param, loc);









3164   }
3165 
3166   if (x->has_receiver()) {
3167     LIRItem* receiver = args->at(0);
3168     LIR_Opr loc = arg_list->at(0);
3169     if (loc->is_register()) {
3170       receiver->load_item_force(loc);
3171     } else {
3172       assert(loc->is_address(), "just checking");
3173       receiver->load_for_store(T_OBJECT);
3174       __ move_wide(receiver->result(), loc->as_address_ptr());
3175     }
3176   }
3177 }
3178 
3179 
3180 // Visits all arguments, returns appropriate items without loading them
3181 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3182   LIRItemList* argument_items = new LIRItemList();
3183   if (x->has_receiver()) {

3290   __ move(tmp, reg);
3291 }
3292 
3293 
3294 
3295 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3296 void LIRGenerator::do_IfOp(IfOp* x) {
3297 #ifdef ASSERT
3298   {
3299     ValueTag xtag = x->x()->type()->tag();
3300     ValueTag ttag = x->tval()->type()->tag();
3301     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3302     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3303     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3304   }
3305 #endif
3306 
3307   LIRItem left(x->x(), this);
3308   LIRItem right(x->y(), this);
3309   left.load_item();
3310   if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3311     right.dont_load_item();
3312   } else {
3313     // substitutability_check() needs to use right as a base register.
3314     right.load_item();
3315   }
3316 
3317   LIRItem t_val(x->tval(), this);
3318   LIRItem f_val(x->fval(), this);
3319   t_val.dont_load_item();
3320   f_val.dont_load_item();

3321 
3322   if (x->substitutability_check()) {
3323     substitutability_check(x, left, right, t_val, f_val);
3324   } else {
3325     LIR_Opr reg = rlock_result(x);
3326     __ cmp(lir_cond(x->cond()), left.result(), right.result());
3327     __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3328   }
3329 }
3330 
3331 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3332   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3333   bool is_acmpeq = (x->cond() == If::eql);
3334   LIR_Opr equal_result     = is_acmpeq ? t_val.result() : f_val.result();
3335   LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3336   LIR_Opr result = rlock_result(x);
3337   CodeEmitInfo* info = state_for(x, x->state_before());
3338 
3339   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3340 }
3341 
3342 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3343   LIR_Opr equal_result     = LIR_OprFact::intConst(1);
3344   LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3345   LIR_Opr result = new_register(T_INT);
3346   CodeEmitInfo* info = state_for(x, x->state_before());
3347 
3348   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3349 
3350   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3351   __ cmp(lir_cond(x->cond()), result, equal_result);
3352 }
3353 
3354 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3355                                                  LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3356                                                  CodeEmitInfo* info) {
3357   LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3358   LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3359 
3360   ciKlass* left_klass = left_val->as_loaded_klass_or_null();
3361   ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3362   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
3363     // No need to load klass -- the operands are statically known to be the same inline klass.
3364   } else {
3365     BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3366     tmp1 = new_register(t_klass);
3367     tmp2 = new_register(t_klass);
3368   }
3369 
3370   CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3371   __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3372                             left_klass, right_klass, tmp1, tmp2, info, slow_path);
3373 }
3374 
3375 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3376   assert(x->number_of_arguments() == 0, "wrong type");
3377   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3378   BasicTypeList signature;
3379   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3380   LIR_Opr reg = result_register_for(x->type());
3381   __ call_runtime_leaf(routine, getThreadTemp(),
3382                        reg, new LIR_OprList());
3383   LIR_Opr result = rlock_result(x);
3384   __ move(reg, result);
3385 }
3386 
3387 
3388 
3389 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3390   switch (x->id()) {
3391   case vmIntrinsics::_intBitsToFloat      :
3392   case vmIntrinsics::_doubleToRawLongBits :

3627   if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3628     profile_parameters_at_call(x);
3629   }
3630 
3631   if (x->recv() != nullptr) {
3632     LIRItem value(x->recv(), this);
3633     value.load_item();
3634     recv = new_register(T_OBJECT);
3635     __ move(value.result(), recv);
3636   }
3637   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3638 }
3639 
3640 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3641   int bci = x->bci_of_invoke();
3642   ciMethodData* md = x->method()->method_data_or_null();
3643   assert(md != nullptr, "Sanity");
3644   ciProfileData* data = md->bci_to_data(bci);
3645   if (data != nullptr) {
3646     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3647     ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3648     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3649 
3650     bool ignored_will_link;
3651     ciSignature* signature_at_call = nullptr;
3652     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3653 
3654     // The offset within the MDO of the entry to update may be too large
3655     // to be used in load/store instructions on some platforms. So have
3656     // profile_type() compute the address of the profile in a register.
3657     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3658         ret->type(), x->ret(), mdp,
3659         !x->needs_null_check(),
3660         signature_at_call->return_type()->as_klass(),
3661         x->callee()->signature()->return_type()->as_klass());
3662     if (exact != nullptr) {
3663       md->set_return_type(bci, exact);
3664     }
3665   }
3666 }
3667 
3668 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
3669   ciKlass* klass = value->as_loaded_klass_or_null();
3670   if (klass != nullptr) {
3671     if (klass->is_inlinetype()) {
3672       profile_flags(md, data, flag, lir_cond_always);
3673     } else if (klass->can_be_inline_klass()) {
3674       return false;
3675     }
3676   } else {
3677     return false;
3678   }
3679   return true;
3680 }
3681 
3682 
3683 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
3684   ciMethod* method = x->method();
3685   assert(method != nullptr, "method should be set if branch is profiled");
3686   ciMethodData* md = method->method_data_or_null();
3687   assert(md != nullptr, "Sanity");
3688   ciProfileData* data = md->bci_to_data(x->bci());
3689   assert(data != nullptr, "must have profiling data");
3690   assert(data->is_ACmpData(), "need BranchData for two-way branches");
3691   ciACmpData* acmp = (ciACmpData*)data;
3692   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3693   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
3694                acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), nullptr, nullptr);
3695   int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
3696   if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
3697     LIR_Opr mdp = new_register(T_METADATA);
3698     __ metadata2reg(md->constant_encoding(), mdp);
3699     LIRItem value(x->left(), this);
3700     value.load_item();
3701     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3702   }
3703   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
3704                in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
3705                acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), nullptr, nullptr);
3706   if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
3707     LIR_Opr mdp = new_register(T_METADATA);
3708     __ metadata2reg(md->constant_encoding(), mdp);
3709     LIRItem value(x->right(), this);
3710     value.load_item();
3711     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3712   }
3713 }
3714 
3715 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3716   // We can safely ignore accessors here, since c2 will inline them anyway,
3717   // accessors are also always mature.
3718   if (!x->inlinee()->is_accessor()) {
3719     CodeEmitInfo* info = state_for(x, x->state(), true);
3720     // Notify the runtime very infrequently only to take care of counter overflows
3721     int freq_log = Tier23InlineeNotifyFreqLog;
3722     double scale;
3723     if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3724       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3725     }
3726     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3727   }
3728 }
3729 
3730 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3731   if (compilation()->is_profiling()) {
3732 #if defined(X86) && !defined(_LP64)
3733     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3734     LIR_Opr left_copy = new_register(left->type());
< prev index next >