< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Compilation.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"


  33 #include "ci/ciInstance.hpp"
  34 #include "ci/ciObjArray.hpp"
  35 #include "ci/ciUtilities.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "compiler/compilerOracle.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/c1/barrierSetC1.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "oops/methodCounters.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/vm_version.hpp"
  45 #include "utilities/bitMap.inline.hpp"
  46 #include "utilities/macros.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 
  49 #ifdef ASSERT
  50 #define __ gen()->lir(__FILE__, __LINE__)->
  51 #else
  52 #define __ gen()->lir()->

 198 }
 199 
 200 
 201 //--------------------------------------------------------------
 202 // LIRItem
 203 
 204 void LIRItem::set_result(LIR_Opr opr) {
 205   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 206   value()->set_operand(opr);
 207 
 208 #ifdef ASSERT
 209   if (opr->is_virtual()) {
 210     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
 211   }
 212 #endif
 213 
 214   _result = opr;
 215 }
 216 
 217 void LIRItem::load_item() {


 218   if (result()->is_illegal()) {
 219     // update the items result
 220     _result = value()->operand();
 221   }
 222   if (!result()->is_register()) {
 223     LIR_Opr reg = _gen->new_register(value()->type());
 224     __ move(result(), reg);
 225     if (result()->is_constant()) {
 226       _result = reg;
 227     } else {
 228       set_result(reg);
 229     }
 230   }
 231 }
 232 
 233 
 234 void LIRItem::load_for_store(BasicType type) {
 235   if (_gen->can_store_as_constant(value(), type)) {
 236     _result = value()->operand();
 237     if (!_result->is_constant()) {

 605     assert(right_op != result_op, "malformed");
 606     __ move(left_op, result_op);
 607     left_op = result_op;
 608   }
 609 
 610   switch(code) {
 611     case Bytecodes::_iand:
 612     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 613 
 614     case Bytecodes::_ior:
 615     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 616 
 617     case Bytecodes::_ixor:
 618     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 619 
 620     default: ShouldNotReachHere();
 621   }
 622 }
 623 
 624 
 625 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {

 626   if (!GenerateSynchronizationCode) return;
 627   // for slow path, use debug info for state after successful locking
 628   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 629   __ load_stack_address_monitor(monitor_no, lock);
 630   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 631   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 632 }
 633 
 634 
 635 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 636   if (!GenerateSynchronizationCode) return;
 637   // setup registers
 638   LIR_Opr hdr = lock;
 639   lock = new_hdr;
 640   CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
 641   __ load_stack_address_monitor(monitor_no, lock);
 642   __ unlock_object(hdr, object, lock, scratch, slow_path);
 643 }
 644 
 645 #ifndef PRODUCT
 646 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 647   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 648     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 649   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 650     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 651   }
 652 }
 653 #endif
 654 
 655 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 656   klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 657   // If klass is not loaded we do not know if the klass has finalizers:
 658   if (UseFastNewInstance && klass->is_loaded()





 659       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 660 
 661     C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id;
 662 
 663     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 664 
 665     assert(klass->is_loaded(), "must be loaded");
 666     // allocate space for instance
 667     assert(klass->size_helper() > 0, "illegal instance size");
 668     const int instance_size = align_object_size(klass->size_helper());
 669     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 670                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 671   } else {
 672     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id);
 673     __ branch(lir_cond_always, slow_path);
 674     __ branch_destination(slow_path->continuation());
 675   }
 676 }
 677 
 678 
 679 static bool is_constant_zero(Instruction* inst) {
 680   IntConstant* c = inst->type()->as_IntConstant();
 681   if (c) {
 682     return (c->value() == 0);
 683   }
 684   return false;
 685 }
 686 
 687 
 688 static bool positive_constant(Instruction* inst) {
 689   IntConstant* c = inst->type()->as_IntConstant();
 690   if (c) {
 691     return (c->value() >= 0);
 692   }
 693   return false;

 753       if (src_type != nullptr) {
 754         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 755           is_exact = true;
 756           expected_type = dst_type;
 757         }
 758       }
 759     }
 760     // at least pass along a good guess
 761     if (expected_type == nullptr) expected_type = dst_exact_type;
 762     if (expected_type == nullptr) expected_type = src_declared_type;
 763     if (expected_type == nullptr) expected_type = dst_declared_type;
 764 
 765     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 766     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 767   }
 768 
 769   // if a probable array type has been identified, figure out if any
 770   // of the required checks for a fast case can be elided.
 771   int flags = LIR_OpArrayCopy::all_flags;
 772 










 773   if (!src_objarray)
 774     flags &= ~LIR_OpArrayCopy::src_objarray;
 775   if (!dst_objarray)
 776     flags &= ~LIR_OpArrayCopy::dst_objarray;
 777 
 778   if (!x->arg_needs_null_check(0))
 779     flags &= ~LIR_OpArrayCopy::src_null_check;
 780   if (!x->arg_needs_null_check(2))
 781     flags &= ~LIR_OpArrayCopy::dst_null_check;
 782 
 783 
 784   if (expected_type != nullptr) {
 785     Value length_limit = nullptr;
 786 
 787     IfOp* ifop = length->as_IfOp();
 788     if (ifop != nullptr) {
 789       // look for expressions like min(v, a.length) which ends up as
 790       //   x > y ? y : x  or  x >= y ? y : x
 791       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 792           ifop->x() == ifop->fval() &&

1445   }
1446   return _vreg_flags.at(vreg_num, f);
1447 }
1448 
1449 
1450 // Block local constant handling.  This code is useful for keeping
1451 // unpinned constants and constants which aren't exposed in the IR in
1452 // registers.  Unpinned Constant instructions have their operands
1453 // cleared when the block is finished so that other blocks can't end
1454 // up referring to their registers.
1455 
1456 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1457   assert(!x->is_pinned(), "only for unpinned constants");
1458   _unpinned_constants.append(x);
1459   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1460 }
1461 
1462 
1463 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1464   BasicType t = c->type();
1465   for (int i = 0; i < _constants.length(); i++) {
1466     LIR_Const* other = _constants.at(i);
1467     if (t == other->type()) {
1468       switch (t) {
1469       case T_INT:
1470       case T_FLOAT:
1471         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1472         break;
1473       case T_LONG:
1474       case T_DOUBLE:
1475         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1476         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1477         break;
1478       case T_OBJECT:
1479         if (c->as_jobject() != other->as_jobject()) continue;
1480         break;
1481       default:
1482         break;
1483       }
1484       return _reg_for_constants.at(i);
1485     }
1486   }
1487 
1488   LIR_Opr result = new_register(t);
1489   __ move((LIR_Opr)c, result);
1490   _constants.append(c);
1491   _reg_for_constants.append(result);


1492   return result;
1493 }
1494 






1495 //------------------------field access--------------------------------------
1496 
1497 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1498   assert(x->number_of_arguments() == 4, "wrong type");
1499   LIRItem obj   (x->argument_at(0), this);  // object
1500   LIRItem offset(x->argument_at(1), this);  // offset of field
1501   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1502   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1503   assert(obj.type()->tag() == objectTag, "invalid type");
1504   assert(cmp.type()->tag() == type->tag(), "invalid type");
1505   assert(val.type()->tag() == type->tag(), "invalid type");
1506 
1507   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1508                                             obj, offset, cmp, val);
1509   set_result(x, result);
1510 }
1511 








1512 // Comment copied form templateTable_i486.cpp
1513 // ----------------------------------------------------------------------------
1514 // Volatile variables demand their effects be made known to all CPU's in
1515 // order.  Store buffers on most chips allow reads & writes to reorder; the
1516 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1517 // memory barrier (i.e., it's not sufficient that the interpreter does not
1518 // reorder volatile references, the hardware also must not reorder them).
1519 //
1520 // According to the new Java Memory Model (JMM):
1521 // (1) All volatiles are serialized wrt to each other.
1522 // ALSO reads & writes act as acquire & release, so:
1523 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1524 // the read float up to before the read.  It's OK for non-volatile memory refs
1525 // that happen before the volatile read to float down below it.
1526 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1527 // that happen BEFORE the write float down to after the write.  It's OK for
1528 // non-volatile memory refs that happen after the volatile write to float up
1529 // before it.
1530 //
1531 // We only put in barriers around volatile refs (they are expensive), not
1532 // _between_ memory refs (that would require us to track the flavor of the
1533 // previous memory refs).  Requirements (2) and (3) require some barriers
1534 // before volatile stores and after volatile loads.  These nearly cover
1535 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1536 // case is placed after volatile-stores although it could just as well go
1537 // before volatile-loads.
1538 
1539 
1540 void LIRGenerator::do_StoreField(StoreField* x) {

1541   bool needs_patching = x->needs_patching();
1542   bool is_volatile = x->field()->is_volatile();
1543   BasicType field_type = x->field_type();
1544 
1545   CodeEmitInfo* info = nullptr;
1546   if (needs_patching) {
1547     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1548     info = state_for(x, x->state_before());
1549   } else if (x->needs_null_check()) {
1550     NullCheck* nc = x->explicit_null_check();
1551     if (nc == nullptr) {
1552       info = state_for(x);
1553     } else {
1554       info = state_for(nc);
1555     }
1556   }
1557 
1558   LIRItem object(x->obj(), this);
1559   LIRItem value(x->value(),  this);
1560 
1561   object.load_item();
1562 
1563   if (is_volatile || needs_patching) {
1564     // load item if field is volatile (fewer special cases for volatiles)
1565     // load item if field not initialized
1566     // load item if field not constant
1567     // because of code patching we cannot inline constants
1568     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1569       value.load_byte_item();
1570     } else  {
1571       value.load_item();
1572     }
1573   } else {
1574     value.load_for_store(field_type);












1575   }
1576 
1577   set_no_result(x);
1578 
1579 #ifndef PRODUCT
1580   if (PrintNotLoaded && needs_patching) {
1581     tty->print_cr("   ###class not loaded at store_%s bci %d",
1582                   x->is_static() ?  "static" : "field", x->printable_bci());
1583   }
1584 #endif
1585 
1586   if (x->needs_null_check() &&
1587       (needs_patching ||
1588        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1589     // Emit an explicit null check because the offset is too large.
1590     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1591     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1592     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1593   }
1594 
1595   DecoratorSet decorators = IN_HEAP;
1596   if (is_volatile) {
1597     decorators |= MO_SEQ_CST;
1598   }
1599   if (needs_patching) {
1600     decorators |= C1_NEEDS_PATCHING;
1601   }
1602 











































1603   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1604                   value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1605 }
1606 



























































































































































1607 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1608   assert(x->is_pinned(),"");


1609   bool needs_range_check = x->compute_needs_range_check();
1610   bool use_length = x->length() != nullptr;
1611   bool obj_store = is_reference_type(x->elt_type());
1612   bool needs_store_check = obj_store && (x->value()->as_Constant() == nullptr ||
1613                                          !get_jobject_constant(x->value())->is_null_object() ||
1614                                          x->should_profile());
1615 
1616   LIRItem array(x->array(), this);
1617   LIRItem index(x->index(), this);
1618   LIRItem value(x->value(), this);
1619   LIRItem length(this);
1620 
1621   array.load_item();
1622   index.load_nonconstant();
1623 
1624   if (use_length && needs_range_check) {
1625     length.set_instruction(x->length());
1626     length.load_item();
1627 
1628   }
1629   if (needs_store_check || x->check_boolean()) {


1630     value.load_item();
1631   } else {
1632     value.load_for_store(x->elt_type());
1633   }
1634 
1635   set_no_result(x);
1636 
1637   // the CodeEmitInfo must be duplicated for each different
1638   // LIR-instruction because spilling can occur anywhere between two
1639   // instructions and so the debug information must be different
1640   CodeEmitInfo* range_check_info = state_for(x);
1641   CodeEmitInfo* null_check_info = nullptr;
1642   if (x->needs_null_check()) {
1643     null_check_info = new CodeEmitInfo(range_check_info);
1644   }
1645 
1646   if (needs_range_check) {
1647     if (use_length) {
1648       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1649       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1650     } else {
1651       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1652       // range_check also does the null check
1653       null_check_info = nullptr;
1654     }
1655   }
1656 




















1657   if (GenerateArrayStoreCheck && needs_store_check) {
1658     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1659     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1660   }
1661 
1662   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1663   if (x->check_boolean()) {
1664     decorators |= C1_MASK_BOOLEAN;
1665   }







1666 
1667   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1668                   nullptr, null_check_info);




















1669 }
1670 
1671 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1672                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1673                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1674   decorators |= ACCESS_READ;
1675   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1676   if (access.is_raw()) {
1677     _barrier_set->BarrierSetC1::load_at(access, result);
1678   } else {
1679     _barrier_set->load_at(access, result);
1680   }
1681 }
1682 
1683 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1684                                LIR_Opr addr, LIR_Opr result) {
1685   decorators |= ACCESS_READ;
1686   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1687   access.set_resolved_addr(addr);
1688   if (access.is_raw()) {
1689     _barrier_set->BarrierSetC1::load(access, result);
1690   } else {
1691     _barrier_set->load(access, result);
1692   }
1693 }
1694 
1695 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1696                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
1697                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {

1698   decorators |= ACCESS_WRITE;
1699   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
1700   if (access.is_raw()) {
1701     _barrier_set->BarrierSetC1::store_at(access, value);
1702   } else {
1703     _barrier_set->store_at(access, value);
1704   }
1705 }
1706 
1707 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1708                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1709   decorators |= ACCESS_READ;
1710   decorators |= ACCESS_WRITE;
1711   // Atomic operations are SEQ_CST by default
1712   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1713   LIRAccess access(this, decorators, base, offset, type);
1714   if (access.is_raw()) {
1715     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
1716   } else {
1717     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
1718   }
1719 }

1730   } else {
1731     return _barrier_set->atomic_xchg_at(access, value);
1732   }
1733 }
1734 
1735 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1736                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
1737   decorators |= ACCESS_READ;
1738   decorators |= ACCESS_WRITE;
1739   // Atomic operations are SEQ_CST by default
1740   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1741   LIRAccess access(this, decorators, base, offset, type);
1742   if (access.is_raw()) {
1743     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1744   } else {
1745     return _barrier_set->atomic_add_at(access, value);
1746   }
1747 }
1748 
1749 void LIRGenerator::do_LoadField(LoadField* x) {

1750   bool needs_patching = x->needs_patching();
1751   bool is_volatile = x->field()->is_volatile();
1752   BasicType field_type = x->field_type();
1753 
1754   CodeEmitInfo* info = nullptr;
1755   if (needs_patching) {
1756     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1757     info = state_for(x, x->state_before());
1758   } else if (x->needs_null_check()) {
1759     NullCheck* nc = x->explicit_null_check();
1760     if (nc == nullptr) {
1761       info = state_for(x);
1762     } else {
1763       info = state_for(nc);
1764     }
1765   }
1766 
1767   LIRItem object(x->obj(), this);
1768 
1769   object.load_item();
1770 
1771 #ifndef PRODUCT

1782        stress_deopt)) {
1783     LIR_Opr obj = object.result();
1784     if (stress_deopt) {
1785       obj = new_register(T_OBJECT);
1786       __ move(LIR_OprFact::oopConst(nullptr), obj);
1787     }
1788     // Emit an explicit null check because the offset is too large.
1789     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1790     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1791     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1792   }
1793 
1794   DecoratorSet decorators = IN_HEAP;
1795   if (is_volatile) {
1796     decorators |= MO_SEQ_CST;
1797   }
1798   if (needs_patching) {
1799     decorators |= C1_NEEDS_PATCHING;
1800   }
1801 





































1802   LIR_Opr result = rlock_result(x, field_type);
1803   access_load_at(decorators, field_type,
1804                  object, LIR_OprFact::intConst(x->offset()), result,
1805                  info ? new CodeEmitInfo(info) : nullptr, info);
1806 }
1807 
1808 // int/long jdk.internal.util.Preconditions.checkIndex
1809 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
1810   assert(x->number_of_arguments() == 3, "wrong type");
1811   LIRItem index(x->argument_at(0), this);
1812   LIRItem length(x->argument_at(1), this);
1813   LIRItem oobef(x->argument_at(2), this);
1814 
1815   index.load_item();
1816   length.load_item();
1817   oobef.load_item();
1818 
1819   LIR_Opr result = rlock_result(x);
1820   // x->state() is created from copy_state_for_exception, it does not contains arguments
1821   // we should prepare them before entering into interpreter mode due to deoptimization.

1930       __ move(LIR_OprFact::oopConst(nullptr), obj);
1931       __ null_check(obj, new CodeEmitInfo(null_check_info));
1932     }
1933   }
1934 
1935   if (needs_range_check) {
1936     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1937       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
1938     } else if (use_length) {
1939       // TODO: use a (modified) version of array_range_check that does not require a
1940       //       constant length to be loaded to a register
1941       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1942       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1943     } else {
1944       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1945       // The range check performs the null check, so clear it out for the load
1946       null_check_info = nullptr;
1947     }
1948   }
1949 
1950   DecoratorSet decorators = IN_HEAP | IS_ARRAY;




























































1951 
1952   LIR_Opr result = rlock_result(x, x->elt_type());
1953   access_load_at(decorators, x->elt_type(),
1954                  array, index.result(), result,
1955                  nullptr, null_check_info);
1956 }
1957 
1958 
1959 void LIRGenerator::do_NullCheck(NullCheck* x) {
1960   if (x->can_trap()) {
1961     LIRItem value(x->obj(), this);
1962     value.load_item();
1963     CodeEmitInfo* info = state_for(x);
1964     __ null_check(value.result(), info);
1965   }
1966 }
1967 
1968 
1969 void LIRGenerator::do_TypeCast(TypeCast* x) {
1970   LIRItem value(x->obj(), this);
1971   value.load_item();
1972   // the result is the same as from the node we are casting
1973   set_result(x, value.result());
1974 }
1975 

2418   Compilation* comp = Compilation::current();
2419   if (do_update) {
2420     // try to find exact type, using CHA if possible, so that loading
2421     // the klass from the object can be avoided
2422     ciType* type = obj->exact_type();
2423     if (type == nullptr) {
2424       type = obj->declared_type();
2425       type = comp->cha_exact_type(type);
2426     }
2427     assert(type == nullptr || type->is_klass(), "type should be class");
2428     exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2429 
2430     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2431   }
2432 
2433   if (!do_null && !do_update) {
2434     return result;
2435   }
2436 
2437   ciKlass* exact_signature_k = nullptr;
2438   if (do_update) {
2439     // Is the type from the signature exact (the only one possible)?
2440     exact_signature_k = signature_at_call_k->exact_klass();
2441     if (exact_signature_k == nullptr) {
2442       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2443     } else {
2444       result = exact_signature_k;
2445       // Known statically. No need to emit any code: prevent
2446       // LIR_Assembler::emit_profile_type() from emitting useless code
2447       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2448     }
2449     // exact_klass and exact_signature_k can be both non null but
2450     // different if exact_klass is loaded after the ciObject for
2451     // exact_signature_k is created.
2452     if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2453       // sometimes the type of the signature is better than the best type
2454       // the compiler has
2455       exact_klass = exact_signature_k;
2456     }
2457     if (callee_signature_k != nullptr &&
2458         callee_signature_k != signature_at_call_k) {

2503         assert(!src->is_illegal(), "check");
2504         BasicType t = src->type();
2505         if (is_reference_type(t)) {
2506           intptr_t profiled_k = parameters->type(j);
2507           Local* local = x->state()->local_at(java_index)->as_Local();
2508           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2509                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2510                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2511           // If the profile is known statically set it once for all and do not emit any code
2512           if (exact != nullptr) {
2513             md->set_parameter_type(j, exact);
2514           }
2515           j++;
2516         }
2517         java_index += type2size[t];
2518       }
2519     }
2520   }
2521 }
2522 








































2523 void LIRGenerator::do_Base(Base* x) {
2524   __ std_entry(LIR_OprFact::illegalOpr);
2525   // Emit moves from physical registers / stack slots to virtual registers
2526   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2527   IRScope* irScope = compilation()->hir()->top_scope();
2528   int java_index = 0;
2529   for (int i = 0; i < args->length(); i++) {
2530     LIR_Opr src = args->at(i);
2531     assert(!src->is_illegal(), "check");
2532     BasicType t = src->type();
2533 
2534     // Types which are smaller than int are passed as int, so
2535     // correct the type which passed.
2536     switch (t) {
2537     case T_BYTE:
2538     case T_BOOLEAN:
2539     case T_SHORT:
2540     case T_CHAR:
2541       t = T_INT;
2542       break;

2585     }
2586     assert(obj->is_valid(), "must be valid");
2587 
2588     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2589       LIR_Opr lock = syncLockOpr();
2590       __ load_stack_address_monitor(0, lock);
2591 
2592       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
2593       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2594 
2595       // receiver is guaranteed non-null so don't need CodeEmitInfo
2596       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
2597     }
2598   }
2599   // increment invocation counters if needed
2600   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2601     profile_parameters(x);
2602     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
2603     increment_invocation_counter(info);
2604   }








2605 
2606   // all blocks with a successor must end with an unconditional jump
2607   // to the successor even if they are consecutive
2608   __ jump(x->default_sux());
2609 }
2610 
2611 
2612 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2613   // construct our frame and model the production of incoming pointer
2614   // to the OSR buffer.
2615   __ osr_entry(LIR_Assembler::osrBufferPointer());
2616   LIR_Opr result = rlock_result(x);
2617   __ move(LIR_Assembler::osrBufferPointer(), result);
2618 }
2619 













2620 
2621 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2622   assert(args->length() == arg_list->length(),
2623          "args=%d, arg_list=%d", args->length(), arg_list->length());
2624   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2625     LIRItem* param = args->at(i);
2626     LIR_Opr loc = arg_list->at(i);
2627     if (loc->is_register()) {
2628       param->load_item_force(loc);
2629     } else {
2630       LIR_Address* addr = loc->as_address_ptr();
2631       param->load_for_store(addr->type());
2632       if (addr->type() == T_OBJECT) {
2633         __ move_wide(param->result(), addr);
2634       } else
2635         __ move(param->result(), addr);
2636     }
2637   }
2638 
2639   if (x->has_receiver()) {
2640     LIRItem* receiver = args->at(0);
2641     LIR_Opr loc = arg_list->at(0);
2642     if (loc->is_register()) {
2643       receiver->load_item_force(loc);
2644     } else {
2645       assert(loc->is_address(), "just checking");
2646       receiver->load_for_store(T_OBJECT);
2647       __ move_wide(receiver->result(), loc->as_address_ptr());
2648     }
2649   }
2650 }
2651 
2652 
2653 // Visits all arguments, returns appropriate items without loading them
2654 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2655   LIRItemList* argument_items = new LIRItemList();
2656   if (x->has_receiver()) {

2782   __ move(tmp, reg);
2783 }
2784 
2785 
2786 
2787 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2788 void LIRGenerator::do_IfOp(IfOp* x) {
2789 #ifdef ASSERT
2790   {
2791     ValueTag xtag = x->x()->type()->tag();
2792     ValueTag ttag = x->tval()->type()->tag();
2793     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2794     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2795     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2796   }
2797 #endif
2798 
2799   LIRItem left(x->x(), this);
2800   LIRItem right(x->y(), this);
2801   left.load_item();
2802   if (can_inline_as_constant(right.value())) {
2803     right.dont_load_item();
2804   } else {

2805     right.load_item();
2806   }
2807 
2808   LIRItem t_val(x->tval(), this);
2809   LIRItem f_val(x->fval(), this);
2810   t_val.dont_load_item();
2811   f_val.dont_load_item();
2812   LIR_Opr reg = rlock_result(x);
2813 
2814   __ cmp(lir_cond(x->cond()), left.result(), right.result());
2815   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));


























































2816 }
2817 
2818 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
2819   assert(x->number_of_arguments() == 0, "wrong type");
2820   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
2821   BasicTypeList signature;
2822   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2823   LIR_Opr reg = result_register_for(x->type());
2824   __ call_runtime_leaf(routine, getThreadTemp(),
2825                        reg, new LIR_OprList());
2826   LIR_Opr result = rlock_result(x);
2827   __ move(reg, result);
2828 }
2829 
2830 
2831 
2832 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2833   switch (x->id()) {
2834   case vmIntrinsics::_intBitsToFloat      :
2835   case vmIntrinsics::_doubleToRawLongBits :

3068   if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3069     profile_parameters_at_call(x);
3070   }
3071 
3072   if (x->recv() != nullptr) {
3073     LIRItem value(x->recv(), this);
3074     value.load_item();
3075     recv = new_register(T_OBJECT);
3076     __ move(value.result(), recv);
3077   }
3078   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3079 }
3080 
3081 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3082   int bci = x->bci_of_invoke();
3083   ciMethodData* md = x->method()->method_data_or_null();
3084   assert(md != nullptr, "Sanity");
3085   ciProfileData* data = md->bci_to_data(bci);
3086   if (data != nullptr) {
3087     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3088     ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3089     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3090 
3091     bool ignored_will_link;
3092     ciSignature* signature_at_call = nullptr;
3093     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3094 
3095     // The offset within the MDO of the entry to update may be too large
3096     // to be used in load/store instructions on some platforms. So have
3097     // profile_type() compute the address of the profile in a register.
3098     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3099         ret->type(), x->ret(), mdp,
3100         !x->needs_null_check(),
3101         signature_at_call->return_type()->as_klass(),
3102         x->callee()->signature()->return_type()->as_klass());
3103     if (exact != nullptr) {
3104       md->set_return_type(bci, exact);
3105     }
3106   }
3107 }
3108 















































3109 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3110   // We can safely ignore accessors here, since c2 will inline them anyway,
3111   // accessors are also always mature.
3112   if (!x->inlinee()->is_accessor()) {
3113     CodeEmitInfo* info = state_for(x, x->state(), true);
3114     // Notify the runtime very infrequently only to take care of counter overflows
3115     int freq_log = Tier23InlineeNotifyFreqLog;
3116     double scale;
3117     if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3118       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3119     }
3120     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3121   }
3122 }
3123 
3124 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3125   if (compilation()->is_profiling()) {
3126 #if defined(X86) && !defined(_LP64)
3127     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3128     LIR_Opr left_copy = new_register(left->type());

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Compilation.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"
  33 #include "ci/ciFlatArrayKlass.hpp"
  34 #include "ci/ciInlineKlass.hpp"
  35 #include "ci/ciInstance.hpp"
  36 #include "ci/ciObjArray.hpp"
  37 #include "ci/ciUtilities.hpp"
  38 #include "compiler/compilerDefinitions.inline.hpp"
  39 #include "compiler/compilerOracle.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/c1/barrierSetC1.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/methodCounters.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/vm_version.hpp"
  47 #include "utilities/bitMap.inline.hpp"
  48 #include "utilities/macros.hpp"
  49 #include "utilities/powerOfTwo.hpp"
  50 
  51 #ifdef ASSERT
  52 #define __ gen()->lir(__FILE__, __LINE__)->
  53 #else
  54 #define __ gen()->lir()->

 200 }
 201 
 202 
 203 //--------------------------------------------------------------
 204 // LIRItem
 205 
 206 void LIRItem::set_result(LIR_Opr opr) {
 207   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 208   value()->set_operand(opr);
 209 
 210 #ifdef ASSERT
 211   if (opr->is_virtual()) {
 212     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), nullptr);
 213   }
 214 #endif
 215 
 216   _result = opr;
 217 }
 218 
 219 void LIRItem::load_item() {
 220   assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
 221 
 222   if (result()->is_illegal()) {
 223     // update the items result
 224     _result = value()->operand();
 225   }
 226   if (!result()->is_register()) {
 227     LIR_Opr reg = _gen->new_register(value()->type());
 228     __ move(result(), reg);
 229     if (result()->is_constant()) {
 230       _result = reg;
 231     } else {
 232       set_result(reg);
 233     }
 234   }
 235 }
 236 
 237 
 238 void LIRItem::load_for_store(BasicType type) {
 239   if (_gen->can_store_as_constant(value(), type)) {
 240     _result = value()->operand();
 241     if (!_result->is_constant()) {

 609     assert(right_op != result_op, "malformed");
 610     __ move(left_op, result_op);
 611     left_op = result_op;
 612   }
 613 
 614   switch(code) {
 615     case Bytecodes::_iand:
 616     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 617 
 618     case Bytecodes::_ior:
 619     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 620 
 621     case Bytecodes::_ixor:
 622     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 623 
 624     default: ShouldNotReachHere();
 625   }
 626 }
 627 
 628 
 629 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
 630                                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_ie_stub) {
 631   if (!GenerateSynchronizationCode) return;
 632   // for slow path, use debug info for state after successful locking
 633   CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_ie_stub, scratch);
 634   __ load_stack_address_monitor(monitor_no, lock);
 635   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 636   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_ie_stub);
 637 }
 638 
 639 
 640 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 641   if (!GenerateSynchronizationCode) return;
 642   // setup registers
 643   LIR_Opr hdr = lock;
 644   lock = new_hdr;
 645   CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
 646   __ load_stack_address_monitor(monitor_no, lock);
 647   __ unlock_object(hdr, object, lock, scratch, slow_path);
 648 }
 649 
 650 #ifndef PRODUCT
 651 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 652   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 653     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 654   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 655     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 656   }
 657 }
 658 #endif
 659 
 660 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 661   if (allow_inline) {
 662     assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
 663     __ metadata2reg(klass->constant_encoding(), klass_reg);
 664   } else {
 665     klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 666   }
 667   // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
 668   if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
 669       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 670 
 671     C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id;
 672 
 673     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 674 
 675     assert(klass->is_loaded(), "must be loaded");
 676     // allocate space for instance
 677     assert(klass->size_helper() > 0, "illegal instance size");
 678     const int instance_size = align_object_size(klass->size_helper());
 679     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 680                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 681   } else {
 682     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id);
 683     __ jump(slow_path);
 684     __ branch_destination(slow_path->continuation());
 685   }
 686 }
 687 
 688 
 689 static bool is_constant_zero(Instruction* inst) {
 690   IntConstant* c = inst->type()->as_IntConstant();
 691   if (c) {
 692     return (c->value() == 0);
 693   }
 694   return false;
 695 }
 696 
 697 
 698 static bool positive_constant(Instruction* inst) {
 699   IntConstant* c = inst->type()->as_IntConstant();
 700   if (c) {
 701     return (c->value() >= 0);
 702   }
 703   return false;

 763       if (src_type != nullptr) {
 764         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 765           is_exact = true;
 766           expected_type = dst_type;
 767         }
 768       }
 769     }
 770     // at least pass along a good guess
 771     if (expected_type == nullptr) expected_type = dst_exact_type;
 772     if (expected_type == nullptr) expected_type = src_declared_type;
 773     if (expected_type == nullptr) expected_type = dst_declared_type;
 774 
 775     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 776     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 777   }
 778 
 779   // if a probable array type has been identified, figure out if any
 780   // of the required checks for a fast case can be elided.
 781   int flags = LIR_OpArrayCopy::all_flags;
 782 
 783   if (!src->is_loaded_flat_array() && !dst->is_loaded_flat_array()) {
 784     flags &= ~LIR_OpArrayCopy::always_slow_path;
 785   }
 786   if (!src->maybe_flat_array()) {
 787     flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
 788   }
 789   if (!dst->maybe_flat_array() && !dst->maybe_null_free_array()) {
 790     flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
 791   }
 792 
 793   if (!src_objarray)
 794     flags &= ~LIR_OpArrayCopy::src_objarray;
 795   if (!dst_objarray)
 796     flags &= ~LIR_OpArrayCopy::dst_objarray;
 797 
 798   if (!x->arg_needs_null_check(0))
 799     flags &= ~LIR_OpArrayCopy::src_null_check;
 800   if (!x->arg_needs_null_check(2))
 801     flags &= ~LIR_OpArrayCopy::dst_null_check;
 802 
 803 
 804   if (expected_type != nullptr) {
 805     Value length_limit = nullptr;
 806 
 807     IfOp* ifop = length->as_IfOp();
 808     if (ifop != nullptr) {
 809       // look for expressions like min(v, a.length) which ends up as
 810       //   x > y ? y : x  or  x >= y ? y : x
 811       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 812           ifop->x() == ifop->fval() &&

1465   }
1466   return _vreg_flags.at(vreg_num, f);
1467 }
1468 
1469 
1470 // Block local constant handling.  This code is useful for keeping
1471 // unpinned constants and constants which aren't exposed in the IR in
1472 // registers.  Unpinned Constant instructions have their operands
1473 // cleared when the block is finished so that other blocks can't end
1474 // up referring to their registers.
1475 
1476 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1477   assert(!x->is_pinned(), "only for unpinned constants");
1478   _unpinned_constants.append(x);
1479   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1480 }
1481 
1482 
1483 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1484   BasicType t = c->type();
1485   for (int i = 0; i < _constants.length() && !in_conditional_code(); i++) {
1486     LIR_Const* other = _constants.at(i);
1487     if (t == other->type()) {
1488       switch (t) {
1489       case T_INT:
1490       case T_FLOAT:
1491         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1492         break;
1493       case T_LONG:
1494       case T_DOUBLE:
1495         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1496         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1497         break;
1498       case T_OBJECT:
1499         if (c->as_jobject() != other->as_jobject()) continue;
1500         break;
1501       default:
1502         break;
1503       }
1504       return _reg_for_constants.at(i);
1505     }
1506   }
1507 
1508   LIR_Opr result = new_register(t);
1509   __ move((LIR_Opr)c, result);
1510   if (!in_conditional_code()) {
1511     _constants.append(c);
1512     _reg_for_constants.append(result);
1513   }
1514   return result;
1515 }
1516 
1517 void LIRGenerator::set_in_conditional_code(bool v) {
1518   assert(v != _in_conditional_code, "must change state");
1519   _in_conditional_code = v;
1520 }
1521 
1522 
1523 //------------------------field access--------------------------------------
1524 
1525 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1526   assert(x->number_of_arguments() == 4, "wrong type");
1527   LIRItem obj   (x->argument_at(0), this);  // object
1528   LIRItem offset(x->argument_at(1), this);  // offset of field
1529   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1530   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1531   assert(obj.type()->tag() == objectTag, "invalid type");
1532   assert(cmp.type()->tag() == type->tag(), "invalid type");
1533   assert(val.type()->tag() == type->tag(), "invalid type");
1534 
1535   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1536                                             obj, offset, cmp, val);
1537   set_result(x, result);
1538 }
1539 
1540 // Returns a int/long value with the null marker bit set
1541 static LIR_Opr null_marker_mask(BasicType bt, ciField* field) {
1542   assert(field->null_marker_offset() != -1, "field does not have null marker");
1543   int nm_offset = field->null_marker_offset() - field->offset_in_bytes();
1544   jlong null_marker = 1ULL << (nm_offset << LogBitsPerByte);
1545   return (bt == T_LONG) ? LIR_OprFact::longConst(null_marker) : LIR_OprFact::intConst(null_marker);
1546 }
1547 
1548 // Comment copied form templateTable_i486.cpp
1549 // ----------------------------------------------------------------------------
1550 // Volatile variables demand their effects be made known to all CPU's in
1551 // order.  Store buffers on most chips allow reads & writes to reorder; the
1552 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1553 // memory barrier (i.e., it's not sufficient that the interpreter does not
1554 // reorder volatile references, the hardware also must not reorder them).
1555 //
1556 // According to the new Java Memory Model (JMM):
1557 // (1) All volatiles are serialized wrt to each other.
1558 // ALSO reads & writes act as acquire & release, so:
1559 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1560 // the read float up to before the read.  It's OK for non-volatile memory refs
1561 // that happen before the volatile read to float down below it.
1562 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1563 // that happen BEFORE the write float down to after the write.  It's OK for
1564 // non-volatile memory refs that happen after the volatile write to float up
1565 // before it.
1566 //
1567 // We only put in barriers around volatile refs (they are expensive), not
1568 // _between_ memory refs (that would require us to track the flavor of the
1569 // previous memory refs).  Requirements (2) and (3) require some barriers
1570 // before volatile stores and after volatile loads.  These nearly cover
1571 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1572 // case is placed after volatile-stores although it could just as well go
1573 // before volatile-loads.
1574 
1575 
1576 void LIRGenerator::do_StoreField(StoreField* x) {
1577   ciField* field = x->field();
1578   bool needs_patching = x->needs_patching();
1579   bool is_volatile = field->is_volatile();
1580   BasicType field_type = x->field_type();
1581 
1582   CodeEmitInfo* info = nullptr;
1583   if (needs_patching) {
1584     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
1585     info = state_for(x, x->state_before());
1586   } else if (x->needs_null_check()) {
1587     NullCheck* nc = x->explicit_null_check();
1588     if (nc == nullptr) {
1589       info = state_for(x);
1590     } else {
1591       info = state_for(nc);
1592     }
1593   }
1594 
1595   LIRItem object(x->obj(), this);
1596   LIRItem value(x->value(),  this);
1597 
1598   object.load_item();
1599 
1600   if (field->is_flat()) {
1601     value.load_item();








1602   } else {
1603     if (is_volatile || needs_patching) {
1604       // load item if field is volatile (fewer special cases for volatiles)
1605       // load item if field not initialized
1606       // load item if field not constant
1607       // because of code patching we cannot inline constants
1608       if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1609         value.load_byte_item();
1610       } else  {
1611         value.load_item();
1612       }
1613     } else {
1614       value.load_for_store(field_type);
1615     }
1616   }
1617 
1618   set_no_result(x);
1619 
1620 #ifndef PRODUCT
1621   if (PrintNotLoaded && needs_patching) {
1622     tty->print_cr("   ###class not loaded at store_%s bci %d",
1623                   x->is_static() ?  "static" : "field", x->printable_bci());
1624   }
1625 #endif
1626 
1627   if (x->needs_null_check() &&
1628       (needs_patching ||
1629        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1630     // Emit an explicit null check because the offset is too large.
1631     // If the class is not loaded and the object is null, we need to deoptimize to throw a
1632     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1633     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1634   }
1635 
1636   DecoratorSet decorators = IN_HEAP;
1637   if (is_volatile) {
1638     decorators |= MO_SEQ_CST;
1639   }
1640   if (needs_patching) {
1641     decorators |= C1_NEEDS_PATCHING;
1642   }
1643 
1644   if (field->is_flat()) {
1645     ciInlineKlass* vk = field->type()->as_inline_klass();
1646 
1647 #ifdef ASSERT
1648     bool is_naturally_atomic = vk->nof_declared_nonstatic_fields() <= 1;
1649     bool needs_atomic_access = !field->is_null_free() || (field->is_volatile() && !is_naturally_atomic);
1650     assert(needs_atomic_access, "No atomic access required");
1651     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
1652     assert(!vk->contains_oops() || !UseZGC, "ZGC does not support embedded oops in flat fields");
1653 #endif
1654 
1655     // Zero the payload
1656     BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
1657     LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
1658     LIR_Opr zero = (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0);
1659     __ move(zero, payload);
1660 
1661     bool is_constant_null = value.is_constant() && value.value()->is_null_obj();
1662     if (!is_constant_null) {
1663       LabelObj* L_isNull = new LabelObj();
1664       bool needs_null_check = !value.is_constant() || value.value()->is_null_obj();
1665       if (needs_null_check) {
1666         __ cmp(lir_cond_equal, value.result(), LIR_OprFact::oopConst(nullptr));
1667         __ branch(lir_cond_equal, L_isNull->label());
1668       }
1669       // Load payload (if not empty) and set null marker (if not null-free)
1670       if (!vk->is_empty()) {
1671         access_load_at(decorators, bt, value, LIR_OprFact::intConst(vk->payload_offset()), payload);
1672       }
1673       if (!field->is_null_free()) {
1674         __ logical_or(payload, null_marker_mask(bt, field), payload);
1675       }
1676       if (needs_null_check) {
1677         __ branch_destination(L_isNull->label());
1678       }
1679     }
1680     access_store_at(decorators, bt, object, LIR_OprFact::intConst(x->offset()), payload,
1681                     // Make sure to emit an implicit null check and pass the information
1682                     // that this is a flat store that might require gc barriers for oop fields.
1683                     info != nullptr ? new CodeEmitInfo(info) : nullptr, info, vk);
1684     return;
1685   }
1686 
1687   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1688                   value.result(), info != nullptr ? new CodeEmitInfo(info) : nullptr, info);
1689 }
1690 
1691 // FIXME -- I can't find any other way to pass an address to access_load_at().
1692 class TempResolvedAddress: public Instruction {
1693  public:
1694   TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1695     set_operand(addr);
1696   }
1697   virtual void input_values_do(ValueVisitor*) {}
1698   virtual void visit(InstructionVisitor* v)   {}
1699   virtual const char* name() const  { return "TempResolvedAddress"; }
1700 };
1701 
1702 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
1703   ciType* array_type = array.value()->declared_type();
1704   ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
1705   assert(flat_array_klass->is_loaded(), "must be");
1706 
1707   int array_header_size = flat_array_klass->array_header_in_bytes();
1708   int shift = flat_array_klass->log2_element_size();
1709 
1710 #ifndef _LP64
1711   LIR_Opr index_op = new_register(T_INT);
1712   // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
1713   // the top (shift+1) bits of index_op must be zero, or
1714   // else throw ArrayIndexOutOfBoundsException
1715   if (index.result()->is_constant()) {
1716     jint const_index = index.result()->as_jint();
1717     __ move(LIR_OprFact::intConst(const_index << shift), index_op);
1718   } else {
1719     __ shift_left(index_op, shift, index.result());
1720   }
1721 #else
1722   LIR_Opr index_op = new_register(T_LONG);
1723   if (index.result()->is_constant()) {
1724     jint const_index = index.result()->as_jint();
1725     __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1726   } else {
1727     __ convert(Bytecodes::_i2l, index.result(), index_op);
1728     // Need to shift manually, as LIR_Address can scale only up to 3.
1729     __ shift_left(index_op, shift, index_op);
1730   }
1731 #endif
1732 
1733   LIR_Opr elm_op = new_pointer_register();
1734   LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
1735   __ leal(LIR_OprFact::address(elm_address), elm_op);
1736   return elm_op;
1737 }
1738 
1739 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, int sub_offset) {
1740   assert(field != nullptr, "Need a subelement type specified");
1741 
1742   // Find the starting address of the source (inside the array)
1743   LIR_Opr elm_op = get_and_load_element_address(array, index);
1744 
1745   BasicType subelt_type = field->type()->basic_type();
1746   TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op);
1747   LIRItem elm_item(elm_resolved_addr, this);
1748 
1749   DecoratorSet decorators = IN_HEAP;
1750   access_load_at(decorators, subelt_type,
1751                      elm_item, LIR_OprFact::intConst(sub_offset), result,
1752                      nullptr, nullptr);
1753 }
1754 
1755 void LIRGenerator::access_flat_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
1756                                           ciField* field, int sub_offset) {
1757   assert(sub_offset == 0 || field != nullptr, "Sanity check");
1758 
1759   // Find the starting address of the source (inside the array)
1760   LIR_Opr elm_op = get_and_load_element_address(array, index);
1761 
1762   ciInlineKlass* elem_klass = nullptr;
1763   if (field != nullptr) {
1764     elem_klass = field->type()->as_inline_klass();
1765   } else {
1766     elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
1767   }
1768   for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1769     ciField* inner_field = elem_klass->nonstatic_field_at(i);
1770     assert(!inner_field->is_flat(), "flat fields must have been expanded");
1771     int obj_offset = inner_field->offset_in_bytes();
1772     int elm_offset = obj_offset - elem_klass->payload_offset() + sub_offset; // object header is not stored in array.
1773     BasicType field_type = inner_field->type()->basic_type();
1774 
1775     // Types which are smaller than int are still passed in an int register.
1776     BasicType reg_type = field_type;
1777     switch (reg_type) {
1778     case T_BYTE:
1779     case T_BOOLEAN:
1780     case T_SHORT:
1781     case T_CHAR:
1782       reg_type = T_INT;
1783       break;
1784     default:
1785       break;
1786     }
1787 
1788     LIR_Opr temp = new_register(reg_type);
1789     TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1790     LIRItem elm_item(elm_resolved_addr, this);
1791 
1792     DecoratorSet decorators = IN_HEAP;
1793     if (is_load) {
1794       access_load_at(decorators, field_type,
1795                      elm_item, LIR_OprFact::intConst(elm_offset), temp,
1796                      nullptr, nullptr);
1797       access_store_at(decorators, field_type,
1798                       obj_item, LIR_OprFact::intConst(obj_offset), temp,
1799                       nullptr, nullptr);
1800     } else {
1801       access_load_at(decorators, field_type,
1802                      obj_item, LIR_OprFact::intConst(obj_offset), temp,
1803                      nullptr, nullptr);
1804       access_store_at(decorators, field_type,
1805                       elm_item, LIR_OprFact::intConst(elm_offset), temp,
1806                       nullptr, nullptr);
1807     }
1808   }
1809 }
1810 
1811 void LIRGenerator::check_flat_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
1812   LIR_Opr tmp = new_register(T_METADATA);
1813   __ check_flat_array(array, value, tmp, slow_path);
1814 }
1815 
1816 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1817   LabelObj* L_end = new LabelObj();
1818   LIR_Opr tmp = new_register(T_METADATA);
1819   __ check_null_free_array(array.result(), tmp);
1820   __ branch(lir_cond_equal, L_end->label());
1821   __ null_check(value.result(), info);
1822   __ branch_destination(L_end->label());
1823 }
1824 
1825 bool LIRGenerator::needs_flat_array_store_check(StoreIndexed* x) {
1826   if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
1827     ciType* type = x->value()->declared_type();
1828     if (type != nullptr && type->is_klass()) {
1829       ciKlass* klass = type->as_klass();
1830       if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->flat_in_array())) {
1831         // This is known to be a non-flat object. If the array is a flat array,
1832         // it will be caught by the code generated by array_store_check().
1833         return false;
1834       }
1835     }
1836     // We're not 100% sure, so let's do the flat_array_store_check.
1837     return true;
1838   }
1839   return false;
1840 }
1841 
1842 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1843   return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1844 }
1845 
1846 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1847   assert(x->is_pinned(),"");
1848   assert(x->elt_type() != T_ARRAY, "never used");
1849   bool is_loaded_flat_array = x->array()->is_loaded_flat_array();
1850   bool needs_range_check = x->compute_needs_range_check();
1851   bool use_length = x->length() != nullptr;
1852   bool obj_store = is_reference_type(x->elt_type());
1853   bool needs_store_check = obj_store && !(is_loaded_flat_array && x->is_exact_flat_array_store()) &&
1854                                         (x->value()->as_Constant() == nullptr ||
1855                                          !get_jobject_constant(x->value())->is_null_object());
1856 
1857   LIRItem array(x->array(), this);
1858   LIRItem index(x->index(), this);
1859   LIRItem value(x->value(), this);
1860   LIRItem length(this);
1861 
1862   array.load_item();
1863   index.load_nonconstant();
1864 
1865   if (use_length && needs_range_check) {
1866     length.set_instruction(x->length());
1867     length.load_item();

1868   }
1869 
1870   if (needs_store_check || x->check_boolean()
1871       || is_loaded_flat_array || needs_flat_array_store_check(x) || needs_null_free_array_store_check(x)) {
1872     value.load_item();
1873   } else {
1874     value.load_for_store(x->elt_type());
1875   }
1876 
1877   set_no_result(x);
1878 
1879   // the CodeEmitInfo must be duplicated for each different
1880   // LIR-instruction because spilling can occur anywhere between two
1881   // instructions and so the debug information must be different
1882   CodeEmitInfo* range_check_info = state_for(x);
1883   CodeEmitInfo* null_check_info = nullptr;
1884   if (x->needs_null_check()) {
1885     null_check_info = new CodeEmitInfo(range_check_info);
1886   }
1887 
1888   if (needs_range_check) {
1889     if (use_length) {
1890       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1891       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1892     } else {
1893       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1894       // range_check also does the null check
1895       null_check_info = nullptr;
1896     }
1897   }
1898 
1899   if (x->should_profile()) {
1900     if (is_loaded_flat_array) {
1901       // No need to profile a store to a flat array of known type. This can happen if
1902       // the type only became known after optimizations (for example, after the PhiSimplifier).
1903       x->set_should_profile(false);
1904     } else {
1905       int bci = x->profiled_bci();
1906       ciMethodData* md = x->profiled_method()->method_data();
1907       assert(md != nullptr, "Sanity");
1908       ciProfileData* data = md->bci_to_data(bci);
1909       assert(data != nullptr && data->is_ArrayStoreData(), "incorrect profiling entry");
1910       ciArrayStoreData* store_data = (ciArrayStoreData*)data;
1911       profile_array_type(x, md, store_data);
1912       assert(store_data->is_ArrayStoreData(), "incorrect profiling entry");
1913       if (x->array()->maybe_null_free_array()) {
1914         profile_null_free_array(array, md, store_data);
1915       }
1916     }
1917   }
1918 
1919   if (GenerateArrayStoreCheck && needs_store_check) {
1920     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1921     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1922   }
1923 
1924   if (is_loaded_flat_array) {
1925     // TODO 8350865 This is currently dead code
1926     if (!x->value()->is_null_free()) {
1927       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1928     }
1929     // If array element is an empty inline type, no need to copy anything
1930     if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
1931       access_flat_array(false, array, index, value);
1932     }
1933   } else {
1934     StoreFlattenedArrayStub* slow_path = nullptr;
1935 
1936     if (needs_flat_array_store_check(x)) {
1937       // Check if we indeed have a flat array
1938       index.load_item();
1939       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
1940       check_flat_array(array.result(), value.result(), slow_path);
1941       set_in_conditional_code(true);
1942     } else if (needs_null_free_array_store_check(x)) {
1943       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1944       check_null_free_array(array, value, info);
1945     }
1946 
1947     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1948     if (x->check_boolean()) {
1949       decorators |= C1_MASK_BOOLEAN;
1950     }
1951 
1952     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), nullptr, null_check_info);
1953     if (slow_path != nullptr) {
1954       __ branch_destination(slow_path->continuation());
1955       set_in_conditional_code(false);
1956     }
1957   }
1958 }
1959 
1960 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1961                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1962                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1963   decorators |= ACCESS_READ;
1964   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1965   if (access.is_raw()) {
1966     _barrier_set->BarrierSetC1::load_at(access, result);
1967   } else {
1968     _barrier_set->load_at(access, result);
1969   }
1970 }
1971 
1972 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1973                                LIR_Opr addr, LIR_Opr result) {
1974   decorators |= ACCESS_READ;
1975   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1976   access.set_resolved_addr(addr);
1977   if (access.is_raw()) {
1978     _barrier_set->BarrierSetC1::load(access, result);
1979   } else {
1980     _barrier_set->load(access, result);
1981   }
1982 }
1983 
1984 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1985                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
1986                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info,
1987                                    ciInlineKlass* vk) {
1988   decorators |= ACCESS_WRITE;
1989   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info, vk);
1990   if (access.is_raw()) {
1991     _barrier_set->BarrierSetC1::store_at(access, value);
1992   } else {
1993     _barrier_set->store_at(access, value);
1994   }
1995 }
1996 
1997 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1998                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1999   decorators |= ACCESS_READ;
2000   decorators |= ACCESS_WRITE;
2001   // Atomic operations are SEQ_CST by default
2002   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2003   LIRAccess access(this, decorators, base, offset, type);
2004   if (access.is_raw()) {
2005     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
2006   } else {
2007     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
2008   }
2009 }

2020   } else {
2021     return _barrier_set->atomic_xchg_at(access, value);
2022   }
2023 }
2024 
2025 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
2026                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
2027   decorators |= ACCESS_READ;
2028   decorators |= ACCESS_WRITE;
2029   // Atomic operations are SEQ_CST by default
2030   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2031   LIRAccess access(this, decorators, base, offset, type);
2032   if (access.is_raw()) {
2033     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
2034   } else {
2035     return _barrier_set->atomic_add_at(access, value);
2036   }
2037 }
2038 
2039 void LIRGenerator::do_LoadField(LoadField* x) {
2040   ciField* field = x->field();
2041   bool needs_patching = x->needs_patching();
2042   bool is_volatile = field->is_volatile();
2043   BasicType field_type = x->field_type();
2044 
2045   CodeEmitInfo* info = nullptr;
2046   if (needs_patching) {
2047     assert(x->explicit_null_check() == nullptr, "can't fold null check into patching field access");
2048     info = state_for(x, x->state_before());
2049   } else if (x->needs_null_check()) {
2050     NullCheck* nc = x->explicit_null_check();
2051     if (nc == nullptr) {
2052       info = state_for(x);
2053     } else {
2054       info = state_for(nc);
2055     }
2056   }
2057 
2058   LIRItem object(x->obj(), this);
2059 
2060   object.load_item();
2061 
2062 #ifndef PRODUCT

2073        stress_deopt)) {
2074     LIR_Opr obj = object.result();
2075     if (stress_deopt) {
2076       obj = new_register(T_OBJECT);
2077       __ move(LIR_OprFact::oopConst(nullptr), obj);
2078     }
2079     // Emit an explicit null check because the offset is too large.
2080     // If the class is not loaded and the object is null, we need to deoptimize to throw a
2081     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2082     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2083   }
2084 
2085   DecoratorSet decorators = IN_HEAP;
2086   if (is_volatile) {
2087     decorators |= MO_SEQ_CST;
2088   }
2089   if (needs_patching) {
2090     decorators |= C1_NEEDS_PATCHING;
2091   }
2092 
2093   if (field->is_flat()) {
2094     ciInlineKlass* vk = field->type()->as_inline_klass();
2095 #ifdef ASSERT
2096     bool is_naturally_atomic = vk->nof_declared_nonstatic_fields() <= 1;
2097     bool needs_atomic_access = !field->is_null_free() || (field->is_volatile() && !is_naturally_atomic);
2098     assert(needs_atomic_access, "No atomic access required");
2099     assert(x->state_before() != nullptr, "Needs state before");
2100 #endif
2101 
2102     // Allocate buffer (we can't easily do this conditionally on the null check below
2103     // because branches added in the LIR are opaque to the register allocator).
2104     NewInstance* buffer = new NewInstance(vk, x->state_before(), false, true);
2105     do_NewInstance(buffer);
2106     LIRItem dest(buffer, this);
2107 
2108     // Copy the payload to the buffer
2109     BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
2110     LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
2111     access_load_at(decorators, bt, object, LIR_OprFact::intConst(field->offset_in_bytes()), payload,
2112                    // Make sure to emit an implicit null check
2113                    info ? new CodeEmitInfo(info) : nullptr, info);
2114     access_store_at(decorators, bt, dest, LIR_OprFact::intConst(vk->payload_offset()), payload);
2115 
2116     if (field->is_null_free()) {
2117       set_result(x, buffer->operand());
2118     } else {
2119       // Check the null marker and set result to null if it's not set
2120       __ logical_and(payload, null_marker_mask(bt, field), payload);
2121       __ cmp(lir_cond_equal, payload, (bt == T_LONG) ? LIR_OprFact::longConst(0) : LIR_OprFact::intConst(0));
2122       __ cmove(lir_cond_equal, LIR_OprFact::oopConst(nullptr), buffer->operand(), rlock_result(x), T_OBJECT);
2123     }
2124 
2125     // Ensure the copy is visible before any subsequent store that publishes the buffer.
2126     __ membar_storestore();
2127     return;
2128   }
2129 
2130   LIR_Opr result = rlock_result(x, field_type);
2131   access_load_at(decorators, field_type,
2132                  object, LIR_OprFact::intConst(x->offset()), result,
2133                  info ? new CodeEmitInfo(info) : nullptr, info);
2134 }
2135 
2136 // int/long jdk.internal.util.Preconditions.checkIndex
2137 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
2138   assert(x->number_of_arguments() == 3, "wrong type");
2139   LIRItem index(x->argument_at(0), this);
2140   LIRItem length(x->argument_at(1), this);
2141   LIRItem oobef(x->argument_at(2), this);
2142 
2143   index.load_item();
2144   length.load_item();
2145   oobef.load_item();
2146 
2147   LIR_Opr result = rlock_result(x);
2148   // x->state() is created from copy_state_for_exception, it does not contains arguments
2149   // we should prepare them before entering into interpreter mode due to deoptimization.

2258       __ move(LIR_OprFact::oopConst(nullptr), obj);
2259       __ null_check(obj, new CodeEmitInfo(null_check_info));
2260     }
2261   }
2262 
2263   if (needs_range_check) {
2264     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2265       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2266     } else if (use_length) {
2267       // TODO: use a (modified) version of array_range_check that does not require a
2268       //       constant length to be loaded to a register
2269       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2270       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2271     } else {
2272       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2273       // The range check performs the null check, so clear it out for the load
2274       null_check_info = nullptr;
2275     }
2276   }
2277 
2278   ciMethodData* md = nullptr;
2279   ciArrayLoadData* load_data = nullptr;
2280   if (x->should_profile()) {
2281     if (x->array()->is_loaded_flat_array()) {
2282       // No need to profile a load from a flat array of known type. This can happen if
2283       // the type only became known after optimizations (for example, after the PhiSimplifier).
2284       x->set_should_profile(false);
2285     } else {
2286       int bci = x->profiled_bci();
2287       md = x->profiled_method()->method_data();
2288       assert(md != nullptr, "Sanity");
2289       ciProfileData* data = md->bci_to_data(bci);
2290       assert(data != nullptr && data->is_ArrayLoadData(), "incorrect profiling entry");
2291       load_data = (ciArrayLoadData*)data;
2292       profile_array_type(x, md, load_data);
2293     }
2294   }
2295 
2296   Value element;
2297   if (x->vt() != nullptr) {
2298     assert(x->array()->is_loaded_flat_array(), "must be");
2299     // Find the destination address (of the NewInlineTypeInstance).
2300     LIRItem obj_item(x->vt(), this);
2301 
2302     access_flat_array(true, array, index, obj_item,
2303                       x->delayed() == nullptr ? 0 : x->delayed()->field(),
2304                       x->delayed() == nullptr ? 0 : x->delayed()->offset());
2305     set_no_result(x);
2306   } else if (x->delayed() != nullptr) {
2307     assert(x->array()->is_loaded_flat_array(), "must be");
2308     LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
2309     access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
2310   } else {
2311     LIR_Opr result = rlock_result(x, x->elt_type());
2312     LoadFlattenedArrayStub* slow_path = nullptr;
2313 
2314     if (x->should_profile() && x->array()->maybe_null_free_array()) {
2315       profile_null_free_array(array, md, load_data);
2316     }
2317 
2318     if (x->elt_type() == T_OBJECT && x->array()->maybe_flat_array()) {
2319       assert(x->delayed() == nullptr, "Delayed LoadIndexed only apply to loaded_flat_arrays");
2320       index.load_item();
2321       // if we are loading from a flat array, load it using a runtime call
2322       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
2323       check_flat_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
2324       set_in_conditional_code(true);
2325     }
2326 
2327     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2328     access_load_at(decorators, x->elt_type(),
2329                    array, index.result(), result,
2330                    nullptr, null_check_info);
2331 
2332     if (slow_path != nullptr) {
2333       __ branch_destination(slow_path->continuation());
2334       set_in_conditional_code(false);
2335     }
2336 
2337     element = x;
2338   }
2339 
2340   if (x->should_profile()) {
2341     profile_element_type(element, md, load_data);
2342   }

2343 }
2344 
2345 
2346 void LIRGenerator::do_NullCheck(NullCheck* x) {
2347   if (x->can_trap()) {
2348     LIRItem value(x->obj(), this);
2349     value.load_item();
2350     CodeEmitInfo* info = state_for(x);
2351     __ null_check(value.result(), info);
2352   }
2353 }
2354 
2355 
2356 void LIRGenerator::do_TypeCast(TypeCast* x) {
2357   LIRItem value(x->obj(), this);
2358   value.load_item();
2359   // the result is the same as from the node we are casting
2360   set_result(x, value.result());
2361 }
2362 

2805   Compilation* comp = Compilation::current();
2806   if (do_update) {
2807     // try to find exact type, using CHA if possible, so that loading
2808     // the klass from the object can be avoided
2809     ciType* type = obj->exact_type();
2810     if (type == nullptr) {
2811       type = obj->declared_type();
2812       type = comp->cha_exact_type(type);
2813     }
2814     assert(type == nullptr || type->is_klass(), "type should be class");
2815     exact_klass = (type != nullptr && type->is_loaded()) ? (ciKlass*)type : nullptr;
2816 
2817     do_update = exact_klass == nullptr || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2818   }
2819 
2820   if (!do_null && !do_update) {
2821     return result;
2822   }
2823 
2824   ciKlass* exact_signature_k = nullptr;
2825   if (do_update && signature_at_call_k != nullptr) {
2826     // Is the type from the signature exact (the only one possible)?
2827     exact_signature_k = signature_at_call_k->exact_klass();
2828     if (exact_signature_k == nullptr) {
2829       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2830     } else {
2831       result = exact_signature_k;
2832       // Known statically. No need to emit any code: prevent
2833       // LIR_Assembler::emit_profile_type() from emitting useless code
2834       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2835     }
2836     // exact_klass and exact_signature_k can be both non null but
2837     // different if exact_klass is loaded after the ciObject for
2838     // exact_signature_k is created.
2839     if (exact_klass == nullptr && exact_signature_k != nullptr && exact_klass != exact_signature_k) {
2840       // sometimes the type of the signature is better than the best type
2841       // the compiler has
2842       exact_klass = exact_signature_k;
2843     }
2844     if (callee_signature_k != nullptr &&
2845         callee_signature_k != signature_at_call_k) {

2890         assert(!src->is_illegal(), "check");
2891         BasicType t = src->type();
2892         if (is_reference_type(t)) {
2893           intptr_t profiled_k = parameters->type(j);
2894           Local* local = x->state()->local_at(java_index)->as_Local();
2895           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2896                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2897                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), nullptr);
2898           // If the profile is known statically set it once for all and do not emit any code
2899           if (exact != nullptr) {
2900             md->set_parameter_type(j, exact);
2901           }
2902           j++;
2903         }
2904         java_index += type2size[t];
2905       }
2906     }
2907   }
2908 }
2909 
2910 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
2911   assert(md != nullptr && data != nullptr, "should have been initialized");
2912   LIR_Opr mdp = new_register(T_METADATA);
2913   __ metadata2reg(md->constant_encoding(), mdp);
2914   LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
2915   LIR_Opr flags = new_register(T_INT);
2916   __ move(addr, flags);
2917   if (condition != lir_cond_always) {
2918     LIR_Opr update = new_register(T_INT);
2919     __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
2920   } else {
2921     __ logical_or(flags, LIR_OprFact::intConst(flag), flags);
2922   }
2923   __ store(flags, addr);
2924 }
2925 
2926 template <class ArrayData> void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ArrayData* load_store) {
2927   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2928   LabelObj* L_end = new LabelObj();
2929   LIR_Opr tmp = new_register(T_METADATA);
2930   __ check_null_free_array(array.result(), tmp);
2931 
2932   profile_flags(md, load_store, ArrayStoreData::null_free_array_byte_constant(), lir_cond_equal);
2933 }
2934 
2935 template <class ArrayData> void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ArrayData*& load_store) {
2936   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2937   LIR_Opr mdp = LIR_OprFact::illegalOpr;
2938   profile_type(md, md->byte_offset_of_slot(load_store, ArrayData::array_offset()), 0,
2939                load_store->array()->type(), x->array(), mdp, true, nullptr, nullptr);
2940 }
2941 
2942 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadData* load_data) {
2943   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
2944   assert(md != nullptr && load_data != nullptr, "should have been initialized");
2945   LIR_Opr mdp = LIR_OprFact::illegalOpr;
2946   profile_type(md, md->byte_offset_of_slot(load_data, ArrayLoadData::element_offset()), 0,
2947                load_data->element()->type(), element, mdp, false, nullptr, nullptr);
2948 }
2949 
2950 void LIRGenerator::do_Base(Base* x) {
2951   __ std_entry(LIR_OprFact::illegalOpr);
2952   // Emit moves from physical registers / stack slots to virtual registers
2953   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2954   IRScope* irScope = compilation()->hir()->top_scope();
2955   int java_index = 0;
2956   for (int i = 0; i < args->length(); i++) {
2957     LIR_Opr src = args->at(i);
2958     assert(!src->is_illegal(), "check");
2959     BasicType t = src->type();
2960 
2961     // Types which are smaller than int are passed as int, so
2962     // correct the type which passed.
2963     switch (t) {
2964     case T_BYTE:
2965     case T_BOOLEAN:
2966     case T_SHORT:
2967     case T_CHAR:
2968       t = T_INT;
2969       break;

3012     }
3013     assert(obj->is_valid(), "must be valid");
3014 
3015     if (method()->is_synchronized() && GenerateSynchronizationCode) {
3016       LIR_Opr lock = syncLockOpr();
3017       __ load_stack_address_monitor(0, lock);
3018 
3019       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, x->check_flag(Instruction::DeoptimizeOnException));
3020       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3021 
3022       // receiver is guaranteed non-null so don't need CodeEmitInfo
3023       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, nullptr);
3024     }
3025   }
3026   // increment invocation counters if needed
3027   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3028     profile_parameters(x);
3029     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), nullptr, false);
3030     increment_invocation_counter(info);
3031   }
3032   if (method()->has_scalarized_args()) {
3033     // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
3034     // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
3035     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), nullptr, false);
3036     CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
3037     __ append(new LIR_Op0(lir_check_orig_pc));
3038     __ branch(lir_cond_notEqual, deopt_stub);
3039   }
3040 
3041   // all blocks with a successor must end with an unconditional jump
3042   // to the successor even if they are consecutive
3043   __ jump(x->default_sux());
3044 }
3045 
3046 
3047 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3048   // construct our frame and model the production of incoming pointer
3049   // to the OSR buffer.
3050   __ osr_entry(LIR_Assembler::osrBufferPointer());
3051   LIR_Opr result = rlock_result(x);
3052   __ move(LIR_Assembler::osrBufferPointer(), result);
3053 }
3054 
3055 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3056   if (loc->is_register()) {
3057     param->load_item_force(loc);
3058   } else {
3059     LIR_Address* addr = loc->as_address_ptr();
3060     param->load_for_store(addr->type());
3061     if (addr->type() == T_OBJECT) {
3062       __ move_wide(param->result(), addr);
3063     } else {
3064       __ move(param->result(), addr);
3065     }
3066   }
3067 }
3068 
3069 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3070   assert(args->length() == arg_list->length(),
3071          "args=%d, arg_list=%d", args->length(), arg_list->length());
3072   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3073     LIRItem* param = args->at(i);
3074     LIR_Opr loc = arg_list->at(i);
3075     invoke_load_one_argument(param, loc);









3076   }
3077 
3078   if (x->has_receiver()) {
3079     LIRItem* receiver = args->at(0);
3080     LIR_Opr loc = arg_list->at(0);
3081     if (loc->is_register()) {
3082       receiver->load_item_force(loc);
3083     } else {
3084       assert(loc->is_address(), "just checking");
3085       receiver->load_for_store(T_OBJECT);
3086       __ move_wide(receiver->result(), loc->as_address_ptr());
3087     }
3088   }
3089 }
3090 
3091 
3092 // Visits all arguments, returns appropriate items without loading them
3093 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3094   LIRItemList* argument_items = new LIRItemList();
3095   if (x->has_receiver()) {

3221   __ move(tmp, reg);
3222 }
3223 
3224 
3225 
3226 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3227 void LIRGenerator::do_IfOp(IfOp* x) {
3228 #ifdef ASSERT
3229   {
3230     ValueTag xtag = x->x()->type()->tag();
3231     ValueTag ttag = x->tval()->type()->tag();
3232     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3233     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3234     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3235   }
3236 #endif
3237 
3238   LIRItem left(x->x(), this);
3239   LIRItem right(x->y(), this);
3240   left.load_item();
3241   if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3242     right.dont_load_item();
3243   } else {
3244     // substitutability_check() needs to use right as a base register.
3245     right.load_item();
3246   }
3247 
3248   LIRItem t_val(x->tval(), this);
3249   LIRItem f_val(x->fval(), this);
3250   t_val.dont_load_item();
3251   f_val.dont_load_item();

3252 
3253   if (x->substitutability_check()) {
3254     substitutability_check(x, left, right, t_val, f_val);
3255   } else {
3256     LIR_Opr reg = rlock_result(x);
3257     __ cmp(lir_cond(x->cond()), left.result(), right.result());
3258     __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3259   }
3260 }
3261 
3262 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3263   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3264   bool is_acmpeq = (x->cond() == If::eql);
3265   LIR_Opr equal_result     = is_acmpeq ? t_val.result() : f_val.result();
3266   LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3267   LIR_Opr result = rlock_result(x);
3268   CodeEmitInfo* info = state_for(x, x->state_before());
3269 
3270   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3271 }
3272 
3273 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3274   LIR_Opr equal_result     = LIR_OprFact::intConst(1);
3275   LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3276   LIR_Opr result = new_register(T_INT);
3277   CodeEmitInfo* info = state_for(x, x->state_before());
3278 
3279   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3280 
3281   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3282   __ cmp(lir_cond(x->cond()), result, equal_result);
3283 }
3284 
3285 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3286                                                  LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3287                                                  CodeEmitInfo* info) {
3288   LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3289   LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3290   LIR_Opr left_klass_op = LIR_OprFact::illegalOpr;
3291   LIR_Opr right_klass_op = LIR_OprFact::illegalOpr;
3292 
3293   ciKlass* left_klass  = left_val ->as_loaded_klass_or_null();
3294   ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3295 
3296   if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
3297       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
3298     init_temps_for_substitutability_check(tmp1, tmp2);
3299   }
3300 
3301   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
3302     // No need to load klass -- the operands are statically known to be the same inline klass.
3303   } else {
3304     BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3305     left_klass_op = new_register(t_klass);
3306     right_klass_op = new_register(t_klass);
3307   }
3308 
3309   CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3310   __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3311                             tmp1, tmp2,
3312                             left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path);
3313 }
3314 
3315 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3316   assert(x->number_of_arguments() == 0, "wrong type");
3317   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3318   BasicTypeList signature;
3319   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3320   LIR_Opr reg = result_register_for(x->type());
3321   __ call_runtime_leaf(routine, getThreadTemp(),
3322                        reg, new LIR_OprList());
3323   LIR_Opr result = rlock_result(x);
3324   __ move(reg, result);
3325 }
3326 
3327 
3328 
3329 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3330   switch (x->id()) {
3331   case vmIntrinsics::_intBitsToFloat      :
3332   case vmIntrinsics::_doubleToRawLongBits :

3565   if (x->recv() != nullptr || x->nb_profiled_args() > 0) {
3566     profile_parameters_at_call(x);
3567   }
3568 
3569   if (x->recv() != nullptr) {
3570     LIRItem value(x->recv(), this);
3571     value.load_item();
3572     recv = new_register(T_OBJECT);
3573     __ move(value.result(), recv);
3574   }
3575   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3576 }
3577 
3578 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3579   int bci = x->bci_of_invoke();
3580   ciMethodData* md = x->method()->method_data_or_null();
3581   assert(md != nullptr, "Sanity");
3582   ciProfileData* data = md->bci_to_data(bci);
3583   if (data != nullptr) {
3584     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3585     ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3586     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3587 
3588     bool ignored_will_link;
3589     ciSignature* signature_at_call = nullptr;
3590     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3591 
3592     // The offset within the MDO of the entry to update may be too large
3593     // to be used in load/store instructions on some platforms. So have
3594     // profile_type() compute the address of the profile in a register.
3595     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3596         ret->type(), x->ret(), mdp,
3597         !x->needs_null_check(),
3598         signature_at_call->return_type()->as_klass(),
3599         x->callee()->signature()->return_type()->as_klass());
3600     if (exact != nullptr) {
3601       md->set_return_type(bci, exact);
3602     }
3603   }
3604 }
3605 
3606 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
3607   ciKlass* klass = value->as_loaded_klass_or_null();
3608   if (klass != nullptr) {
3609     if (klass->is_inlinetype()) {
3610       profile_flags(md, data, flag, lir_cond_always);
3611     } else if (klass->can_be_inline_klass()) {
3612       return false;
3613     }
3614   } else {
3615     return false;
3616   }
3617   return true;
3618 }
3619 
3620 
3621 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
3622   ciMethod* method = x->method();
3623   assert(method != nullptr, "method should be set if branch is profiled");
3624   ciMethodData* md = method->method_data_or_null();
3625   assert(md != nullptr, "Sanity");
3626   ciProfileData* data = md->bci_to_data(x->bci());
3627   assert(data != nullptr, "must have profiling data");
3628   assert(data->is_ACmpData(), "need BranchData for two-way branches");
3629   ciACmpData* acmp = (ciACmpData*)data;
3630   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3631   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
3632                acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), nullptr, nullptr);
3633   int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
3634   if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
3635     LIR_Opr mdp = new_register(T_METADATA);
3636     __ metadata2reg(md->constant_encoding(), mdp);
3637     LIRItem value(x->left(), this);
3638     value.load_item();
3639     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3640   }
3641   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
3642                in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
3643                acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), nullptr, nullptr);
3644   if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
3645     LIR_Opr mdp = new_register(T_METADATA);
3646     __ metadata2reg(md->constant_encoding(), mdp);
3647     LIRItem value(x->right(), this);
3648     value.load_item();
3649     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3650   }
3651 }
3652 
3653 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3654   // We can safely ignore accessors here, since c2 will inline them anyway,
3655   // accessors are also always mature.
3656   if (!x->inlinee()->is_accessor()) {
3657     CodeEmitInfo* info = state_for(x, x->state(), true);
3658     // Notify the runtime very infrequently only to take care of counter overflows
3659     int freq_log = Tier23InlineeNotifyFreqLog;
3660     double scale;
3661     if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
3662       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3663     }
3664     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3665   }
3666 }
3667 
3668 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3669   if (compilation()->is_profiling()) {
3670 #if defined(X86) && !defined(_LP64)
3671     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3672     LIR_Opr left_copy = new_register(left->type());
< prev index next >