< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"


  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/c1/barrierSetC1.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/stubRoutines.hpp"
  43 #include "runtime/vm_version.hpp"
  44 #include "utilities/bitMap.inline.hpp"
  45 #include "utilities/macros.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 #ifdef ASSERT
  49 #define __ gen()->lir(__FILE__, __LINE__)->
  50 #else
  51 #define __ gen()->lir()->
  52 #endif
  53 

 194   ResolveNode* source = source_node(src);
 195   source->append(destination_node(dest));
 196 }
 197 
 198 
 199 //--------------------------------------------------------------
 200 // LIRItem
 201 
 202 void LIRItem::set_result(LIR_Opr opr) {
 203   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 204   value()->set_operand(opr);
 205 
 206   if (opr->is_virtual()) {
 207     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
 208   }
 209 
 210   _result = opr;
 211 }
 212 
 213 void LIRItem::load_item() {


 214   if (result()->is_illegal()) {
 215     // update the items result
 216     _result = value()->operand();
 217   }
 218   if (!result()->is_register()) {
 219     LIR_Opr reg = _gen->new_register(value()->type());
 220     __ move(result(), reg);
 221     if (result()->is_constant()) {
 222       _result = reg;
 223     } else {
 224       set_result(reg);
 225     }
 226   }
 227 }
 228 
 229 
 230 void LIRItem::load_for_store(BasicType type) {
 231   if (_gen->can_store_as_constant(value(), type)) {
 232     _result = value()->operand();
 233     if (!_result->is_constant()) {

 589     assert(right_op != result_op, "malformed");
 590     __ move(left_op, result_op);
 591     left_op = result_op;
 592   }
 593 
 594   switch(code) {
 595     case Bytecodes::_iand:
 596     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 597 
 598     case Bytecodes::_ior:
 599     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 600 
 601     case Bytecodes::_ixor:
 602     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 603 
 604     default: ShouldNotReachHere();
 605   }
 606 }
 607 
 608 
 609 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {

 610   if (!GenerateSynchronizationCode) return;
 611   // for slow path, use debug info for state after successful locking
 612   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 613   __ load_stack_address_monitor(monitor_no, lock);
 614   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 615   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 616 }
 617 
 618 
 619 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 620   if (!GenerateSynchronizationCode) return;
 621   // setup registers
 622   LIR_Opr hdr = lock;
 623   lock = new_hdr;
 624   CodeStub* slow_path = new MonitorExitStub(lock, !UseHeavyMonitors, monitor_no);
 625   __ load_stack_address_monitor(monitor_no, lock);
 626   __ unlock_object(hdr, object, lock, scratch, slow_path);
 627 }
 628 
 629 #ifndef PRODUCT
 630 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 631   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 632     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 633   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 634     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 635   }
 636 }
 637 #endif
 638 
 639 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 640   klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 641   // If klass is not loaded we do not know if the klass has finalizers:
 642   if (UseFastNewInstance && klass->is_loaded()





 643       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 644 
 645     Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
 646 
 647     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 648 
 649     assert(klass->is_loaded(), "must be loaded");
 650     // allocate space for instance
 651     assert(klass->size_helper() > 0, "illegal instance size");
 652     const int instance_size = align_object_size(klass->size_helper());
 653     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 654                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 655   } else {
 656     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
 657     __ branch(lir_cond_always, slow_path);
 658     __ branch_destination(slow_path->continuation());
 659   }
 660 }
 661 
 662 
 663 static bool is_constant_zero(Instruction* inst) {
 664   IntConstant* c = inst->type()->as_IntConstant();
 665   if (c) {
 666     return (c->value() == 0);
 667   }
 668   return false;
 669 }
 670 
 671 
 672 static bool positive_constant(Instruction* inst) {
 673   IntConstant* c = inst->type()->as_IntConstant();
 674   if (c) {
 675     return (c->value() >= 0);
 676   }
 677   return false;

 737       if (src_type != NULL) {
 738         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 739           is_exact = true;
 740           expected_type = dst_type;
 741         }
 742       }
 743     }
 744     // at least pass along a good guess
 745     if (expected_type == NULL) expected_type = dst_exact_type;
 746     if (expected_type == NULL) expected_type = src_declared_type;
 747     if (expected_type == NULL) expected_type = dst_declared_type;
 748 
 749     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 750     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 751   }
 752 
 753   // if a probable array type has been identified, figure out if any
 754   // of the required checks for a fast case can be elided.
 755   int flags = LIR_OpArrayCopy::all_flags;
 756 










 757   if (!src_objarray)
 758     flags &= ~LIR_OpArrayCopy::src_objarray;
 759   if (!dst_objarray)
 760     flags &= ~LIR_OpArrayCopy::dst_objarray;
 761 
 762   if (!x->arg_needs_null_check(0))
 763     flags &= ~LIR_OpArrayCopy::src_null_check;
 764   if (!x->arg_needs_null_check(2))
 765     flags &= ~LIR_OpArrayCopy::dst_null_check;
 766 
 767 
 768   if (expected_type != NULL) {
 769     Value length_limit = NULL;
 770 
 771     IfOp* ifop = length->as_IfOp();
 772     if (ifop != NULL) {
 773       // look for expressions like min(v, a.length) which ends up as
 774       //   x > y ? y : x  or  x >= y ? y : x
 775       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 776           ifop->x() == ifop->fval() &&

1541       case T_FLOAT:
1542         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1543         break;
1544       case T_LONG:
1545       case T_DOUBLE:
1546         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1547         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1548         break;
1549       case T_OBJECT:
1550         if (c->as_jobject() != other->as_jobject()) continue;
1551         break;
1552       default:
1553         break;
1554       }
1555       return _reg_for_constants.at(i);
1556     }
1557   }
1558 
1559   LIR_Opr result = new_register(t);
1560   __ move((LIR_Opr)c, result);
1561   _constants.append(c);
1562   _reg_for_constants.append(result);


1563   return result;
1564 }
1565 






1566 //------------------------field access--------------------------------------
1567 
1568 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1569   assert(x->number_of_arguments() == 4, "wrong type");
1570   LIRItem obj   (x->argument_at(0), this);  // object
1571   LIRItem offset(x->argument_at(1), this);  // offset of field
1572   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1573   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1574   assert(obj.type()->tag() == objectTag, "invalid type");
1575   assert(cmp.type()->tag() == type->tag(), "invalid type");
1576   assert(val.type()->tag() == type->tag(), "invalid type");
1577 
1578   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1579                                             obj, offset, cmp, val);
1580   set_result(x, result);
1581 }
1582 
1583 // Comment copied form templateTable_i486.cpp
1584 // ----------------------------------------------------------------------------
1585 // Volatile variables demand their effects be made known to all CPU's in

1637     // load item if field not constant
1638     // because of code patching we cannot inline constants
1639     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1640       value.load_byte_item();
1641     } else  {
1642       value.load_item();
1643     }
1644   } else {
1645     value.load_for_store(field_type);
1646   }
1647 
1648   set_no_result(x);
1649 
1650 #ifndef PRODUCT
1651   if (PrintNotLoaded && needs_patching) {
1652     tty->print_cr("   ###class not loaded at store_%s bci %d",
1653                   x->is_static() ?  "static" : "field", x->printable_bci());
1654   }
1655 #endif
1656 





1657   if (x->needs_null_check() &&
1658       (needs_patching ||
1659        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1660     // Emit an explicit null check because the offset is too large.
1661     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1662     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1663     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1664   }
1665 
1666   DecoratorSet decorators = IN_HEAP;
1667   if (is_volatile) {
1668     decorators |= MO_SEQ_CST;
1669   }
1670   if (needs_patching) {
1671     decorators |= C1_NEEDS_PATCHING;
1672   }
1673 
1674   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1675                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1676 }
1677 













































































































































































1678 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1679   assert(x->is_pinned(),"");


1680   bool needs_range_check = x->compute_needs_range_check();
1681   bool use_length = x->length() != NULL;
1682   bool obj_store = is_reference_type(x->elt_type());
1683   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
1684                                          !get_jobject_constant(x->value())->is_null_object() ||
1685                                          x->should_profile());
1686 
1687   LIRItem array(x->array(), this);
1688   LIRItem index(x->index(), this);
1689   LIRItem value(x->value(), this);
1690   LIRItem length(this);
1691 
1692   array.load_item();
1693   index.load_nonconstant();
1694 
1695   if (use_length && needs_range_check) {
1696     length.set_instruction(x->length());
1697     length.load_item();
1698 
1699   }
1700   if (needs_store_check || x->check_boolean()) {


1701     value.load_item();
1702   } else {
1703     value.load_for_store(x->elt_type());
1704   }
1705 
1706   set_no_result(x);
1707 
1708   // the CodeEmitInfo must be duplicated for each different
1709   // LIR-instruction because spilling can occur anywhere between two
1710   // instructions and so the debug information must be different
1711   CodeEmitInfo* range_check_info = state_for(x);
1712   CodeEmitInfo* null_check_info = NULL;
1713   if (x->needs_null_check()) {
1714     null_check_info = new CodeEmitInfo(range_check_info);
1715   }
1716 
1717   if (GenerateRangeChecks && needs_range_check) {
1718     if (use_length) {
1719       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1720       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1721     } else {
1722       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1723       // range_check also does the null check
1724       null_check_info = NULL;
1725     }
1726   }
1727 
















1728   if (GenerateArrayStoreCheck && needs_store_check) {
1729     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1730     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1731   }
1732 
1733   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1734   if (x->check_boolean()) {
1735     decorators |= C1_MASK_BOOLEAN;
1736   }

















1737 
1738   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1739                   NULL, null_check_info);










1740 }
1741 
1742 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1743                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1744                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1745   decorators |= ACCESS_READ;
1746   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1747   if (access.is_raw()) {
1748     _barrier_set->BarrierSetC1::load_at(access, result);
1749   } else {
1750     _barrier_set->load_at(access, result);
1751   }
1752 }
1753 
1754 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1755                                LIR_Opr addr, LIR_Opr result) {
1756   decorators |= ACCESS_READ;
1757   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1758   access.set_resolved_addr(addr);
1759   if (access.is_raw()) {

1800     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
1801   } else {
1802     return _barrier_set->atomic_xchg_at(access, value);
1803   }
1804 }
1805 
1806 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1807                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
1808   decorators |= ACCESS_READ;
1809   decorators |= ACCESS_WRITE;
1810   // Atomic operations are SEQ_CST by default
1811   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1812   LIRAccess access(this, decorators, base, offset, type);
1813   if (access.is_raw()) {
1814     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1815   } else {
1816     return _barrier_set->atomic_add_at(access, value);
1817   }
1818 }
1819 

























1820 void LIRGenerator::do_LoadField(LoadField* x) {
1821   bool needs_patching = x->needs_patching();
1822   bool is_volatile = x->field()->is_volatile();
1823   BasicType field_type = x->field_type();
1824 
1825   CodeEmitInfo* info = NULL;
1826   if (needs_patching) {
1827     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1828     info = state_for(x, x->state_before());
1829   } else if (x->needs_null_check()) {
1830     NullCheck* nc = x->explicit_null_check();
1831     if (nc == NULL) {
1832       info = state_for(x);
1833     } else {
1834       info = state_for(nc);
1835     }
1836   }
1837 
1838   LIRItem object(x->obj(), this);
1839 
1840   object.load_item();
1841 
1842 #ifndef PRODUCT
1843   if (PrintNotLoaded && needs_patching) {
1844     tty->print_cr("   ###class not loaded at load_%s bci %d",
1845                   x->is_static() ?  "static" : "field", x->printable_bci());
1846   }
1847 #endif
1848 







1849   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1850   if (x->needs_null_check() &&
1851       (needs_patching ||
1852        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1853        stress_deopt)) {
1854     LIR_Opr obj = object.result();
1855     if (stress_deopt) {
1856       obj = new_register(T_OBJECT);
1857       __ move(LIR_OprFact::oopConst(NULL), obj);
1858     }
1859     // Emit an explicit null check because the offset is too large.
1860     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1861     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1862     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1863   }
1864 
1865   DecoratorSet decorators = IN_HEAP;
1866   if (is_volatile) {
1867     decorators |= MO_SEQ_CST;
1868   }
1869   if (needs_patching) {
1870     decorators |= C1_NEEDS_PATCHING;
1871   }
1872 
1873   LIR_Opr result = rlock_result(x, field_type);
1874   access_load_at(decorators, field_type,
1875                  object, LIR_OprFact::intConst(x->offset()), result,
1876                  info ? new CodeEmitInfo(info) : NULL, info);

































1877 }
1878 
1879 // int/long jdk.internal.util.Preconditions.checkIndex
1880 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
1881   assert(x->number_of_arguments() == 3, "wrong type");
1882   LIRItem index(x->argument_at(0), this);
1883   LIRItem length(x->argument_at(1), this);
1884   LIRItem oobef(x->argument_at(2), this);
1885 
1886   index.load_item();
1887   length.load_item();
1888   oobef.load_item();
1889 
1890   LIR_Opr result = rlock_result(x);
1891   // x->state() is created from copy_state_for_exception, it does not contains arguments
1892   // we should prepare them before entering into interpreter mode due to deoptimization.
1893   ValueStack* state = x->state();
1894   for (int i = 0; i < x->number_of_arguments(); i++) {
1895     Value arg = x->argument_at(i);
1896     state->push(arg->type(), arg);

2001       __ move(LIR_OprFact::oopConst(NULL), obj);
2002       __ null_check(obj, new CodeEmitInfo(null_check_info));
2003     }
2004   }
2005 
2006   if (GenerateRangeChecks && needs_range_check) {
2007     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2008       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2009     } else if (use_length) {
2010       // TODO: use a (modified) version of array_range_check that does not require a
2011       //       constant length to be loaded to a register
2012       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2013       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2014     } else {
2015       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2016       // The range check performs the null check, so clear it out for the load
2017       null_check_info = NULL;
2018     }
2019   }
2020 
2021   DecoratorSet decorators = IN_HEAP | IS_ARRAY;



































































2022 
2023   LIR_Opr result = rlock_result(x, x->elt_type());
2024   access_load_at(decorators, x->elt_type(),
2025                  array, index.result(), result,
2026                  NULL, null_check_info);
2027 }
2028 












2029 
2030 void LIRGenerator::do_NullCheck(NullCheck* x) {
2031   if (x->can_trap()) {
2032     LIRItem value(x->obj(), this);
2033     value.load_item();
2034     CodeEmitInfo* info = state_for(x);
2035     __ null_check(value.result(), info);
2036   }
2037 }
2038 
2039 
2040 void LIRGenerator::do_TypeCast(TypeCast* x) {
2041   LIRItem value(x->obj(), this);
2042   value.load_item();
2043   // the result is the same as from the node we are casting
2044   set_result(x, value.result());
2045 }
2046 
2047 
2048 void LIRGenerator::do_Throw(Throw* x) {

2507   Compilation* comp = Compilation::current();
2508   if (do_update) {
2509     // try to find exact type, using CHA if possible, so that loading
2510     // the klass from the object can be avoided
2511     ciType* type = obj->exact_type();
2512     if (type == NULL) {
2513       type = obj->declared_type();
2514       type = comp->cha_exact_type(type);
2515     }
2516     assert(type == NULL || type->is_klass(), "type should be class");
2517     exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2518 
2519     do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2520   }
2521 
2522   if (!do_null && !do_update) {
2523     return result;
2524   }
2525 
2526   ciKlass* exact_signature_k = NULL;
2527   if (do_update) {
2528     // Is the type from the signature exact (the only one possible)?
2529     exact_signature_k = signature_at_call_k->exact_klass();
2530     if (exact_signature_k == NULL) {
2531       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2532     } else {
2533       result = exact_signature_k;
2534       // Known statically. No need to emit any code: prevent
2535       // LIR_Assembler::emit_profile_type() from emitting useless code
2536       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2537     }
2538     // exact_klass and exact_signature_k can be both non NULL but
2539     // different if exact_klass is loaded after the ciObject for
2540     // exact_signature_k is created.
2541     if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2542       // sometimes the type of the signature is better than the best type
2543       // the compiler has
2544       exact_klass = exact_signature_k;
2545     }
2546     if (callee_signature_k != NULL &&
2547         callee_signature_k != signature_at_call_k) {

2592         assert(!src->is_illegal(), "check");
2593         BasicType t = src->type();
2594         if (is_reference_type(t)) {
2595           intptr_t profiled_k = parameters->type(j);
2596           Local* local = x->state()->local_at(java_index)->as_Local();
2597           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2598                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2599                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2600           // If the profile is known statically set it once for all and do not emit any code
2601           if (exact != NULL) {
2602             md->set_parameter_type(j, exact);
2603           }
2604           j++;
2605         }
2606         java_index += type2size[t];
2607       }
2608     }
2609   }
2610 }
2611 














































2612 void LIRGenerator::do_Base(Base* x) {
2613   __ std_entry(LIR_OprFact::illegalOpr);
2614   // Emit moves from physical registers / stack slots to virtual registers
2615   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2616   IRScope* irScope = compilation()->hir()->top_scope();
2617   int java_index = 0;
2618   for (int i = 0; i < args->length(); i++) {
2619     LIR_Opr src = args->at(i);
2620     assert(!src->is_illegal(), "check");
2621     BasicType t = src->type();
2622 
2623     // Types which are smaller than int are passed as int, so
2624     // correct the type which passed.
2625     switch (t) {
2626     case T_BYTE:
2627     case T_BOOLEAN:
2628     case T_SHORT:
2629     case T_CHAR:
2630       t = T_INT;
2631       break;

2672     }
2673     assert(obj->is_valid(), "must be valid");
2674 
2675     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2676       LIR_Opr lock = syncLockOpr();
2677       __ load_stack_address_monitor(0, lock);
2678 
2679       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2680       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2681 
2682       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2683       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2684     }
2685   }
2686   // increment invocation counters if needed
2687   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2688     profile_parameters(x);
2689     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2690     increment_invocation_counter(info);
2691   }








2692 
2693   // all blocks with a successor must end with an unconditional jump
2694   // to the successor even if they are consecutive
2695   __ jump(x->default_sux());
2696 }
2697 
2698 
2699 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2700   // construct our frame and model the production of incoming pointer
2701   // to the OSR buffer.
2702   __ osr_entry(LIR_Assembler::osrBufferPointer());
2703   LIR_Opr result = rlock_result(x);
2704   __ move(LIR_Assembler::osrBufferPointer(), result);
2705 }
2706 














2707 
2708 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2709   assert(args->length() == arg_list->length(),
2710          "args=%d, arg_list=%d", args->length(), arg_list->length());
2711   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2712     LIRItem* param = args->at(i);
2713     LIR_Opr loc = arg_list->at(i);
2714     if (loc->is_register()) {
2715       param->load_item_force(loc);
2716     } else {
2717       LIR_Address* addr = loc->as_address_ptr();
2718       param->load_for_store(addr->type());
2719       if (addr->type() == T_OBJECT) {
2720         __ move_wide(param->result(), addr);
2721       } else
2722         __ move(param->result(), addr);
2723     }
2724   }
2725 
2726   if (x->has_receiver()) {
2727     LIRItem* receiver = args->at(0);
2728     LIR_Opr loc = arg_list->at(0);
2729     if (loc->is_register()) {
2730       receiver->load_item_force(loc);
2731     } else {
2732       assert(loc->is_address(), "just checking");
2733       receiver->load_for_store(T_OBJECT);
2734       __ move_wide(receiver->result(), loc->as_address_ptr());
2735     }
2736   }
2737 }
2738 
2739 
2740 // Visits all arguments, returns appropriate items without loading them
2741 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2742   LIRItemList* argument_items = new LIRItemList();
2743   if (x->has_receiver()) {

2869   __ move(tmp, reg);
2870 }
2871 
2872 
2873 
2874 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2875 void LIRGenerator::do_IfOp(IfOp* x) {
2876 #ifdef ASSERT
2877   {
2878     ValueTag xtag = x->x()->type()->tag();
2879     ValueTag ttag = x->tval()->type()->tag();
2880     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2881     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2882     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2883   }
2884 #endif
2885 
2886   LIRItem left(x->x(), this);
2887   LIRItem right(x->y(), this);
2888   left.load_item();
2889   if (can_inline_as_constant(right.value())) {
2890     right.dont_load_item();
2891   } else {

2892     right.load_item();
2893   }
2894 
2895   LIRItem t_val(x->tval(), this);
2896   LIRItem f_val(x->fval(), this);
2897   t_val.dont_load_item();
2898   f_val.dont_load_item();
2899   LIR_Opr reg = rlock_result(x);
2900 
2901   __ cmp(lir_cond(x->cond()), left.result(), right.result());
2902   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));


























































2903 }
2904 
2905 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
2906   assert(x->number_of_arguments() == 0, "wrong type");
2907   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
2908   BasicTypeList signature;
2909   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2910   LIR_Opr reg = result_register_for(x->type());
2911   __ call_runtime_leaf(routine, getThreadTemp(),
2912                        reg, new LIR_OprList());
2913   LIR_Opr result = rlock_result(x);
2914   __ move(reg, result);
2915 }
2916 
2917 
2918 
2919 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2920   switch (x->id()) {
2921   case vmIntrinsics::_intBitsToFloat      :
2922   case vmIntrinsics::_doubleToRawLongBits :

3152   if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3153     profile_parameters_at_call(x);
3154   }
3155 
3156   if (x->recv() != NULL) {
3157     LIRItem value(x->recv(), this);
3158     value.load_item();
3159     recv = new_register(T_OBJECT);
3160     __ move(value.result(), recv);
3161   }
3162   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3163 }
3164 
3165 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3166   int bci = x->bci_of_invoke();
3167   ciMethodData* md = x->method()->method_data_or_null();
3168   assert(md != NULL, "Sanity");
3169   ciProfileData* data = md->bci_to_data(bci);
3170   if (data != NULL) {
3171     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3172     ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3173     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3174 
3175     bool ignored_will_link;
3176     ciSignature* signature_at_call = NULL;
3177     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3178 
3179     // The offset within the MDO of the entry to update may be too large
3180     // to be used in load/store instructions on some platforms. So have
3181     // profile_type() compute the address of the profile in a register.
3182     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3183         ret->type(), x->ret(), mdp,
3184         !x->needs_null_check(),
3185         signature_at_call->return_type()->as_klass(),
3186         x->callee()->signature()->return_type()->as_klass());
3187     if (exact != NULL) {
3188       md->set_return_type(bci, exact);
3189     }
3190   }
3191 }
3192 















































3193 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3194   // We can safely ignore accessors here, since c2 will inline them anyway,
3195   // accessors are also always mature.
3196   if (!x->inlinee()->is_accessor()) {
3197     CodeEmitInfo* info = state_for(x, x->state(), true);
3198     // Notify the runtime very infrequently only to take care of counter overflows
3199     int freq_log = Tier23InlineeNotifyFreqLog;
3200     double scale;
3201     if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3202       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3203     }
3204     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3205   }
3206 }
3207 
3208 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3209   if (compilation()->is_profiling()) {
3210 #if defined(X86) && !defined(_LP64)
3211     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3212     LIR_Opr left_copy = new_register(left->type());

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciFlatArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "ci/ciObjArray.hpp"
  38 #include "ci/ciUtilities.hpp"
  39 #include "compiler/compilerDefinitions.inline.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/c1/barrierSetC1.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/vm_version.hpp"
  46 #include "utilities/bitMap.inline.hpp"
  47 #include "utilities/macros.hpp"
  48 #include "utilities/powerOfTwo.hpp"
  49 
  50 #ifdef ASSERT
  51 #define __ gen()->lir(__FILE__, __LINE__)->
  52 #else
  53 #define __ gen()->lir()->
  54 #endif
  55 

 196   ResolveNode* source = source_node(src);
 197   source->append(destination_node(dest));
 198 }
 199 
 200 
 201 //--------------------------------------------------------------
 202 // LIRItem
 203 
 204 void LIRItem::set_result(LIR_Opr opr) {
 205   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 206   value()->set_operand(opr);
 207 
 208   if (opr->is_virtual()) {
 209     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
 210   }
 211 
 212   _result = opr;
 213 }
 214 
 215 void LIRItem::load_item() {
 216   assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
 217 
 218   if (result()->is_illegal()) {
 219     // update the items result
 220     _result = value()->operand();
 221   }
 222   if (!result()->is_register()) {
 223     LIR_Opr reg = _gen->new_register(value()->type());
 224     __ move(result(), reg);
 225     if (result()->is_constant()) {
 226       _result = reg;
 227     } else {
 228       set_result(reg);
 229     }
 230   }
 231 }
 232 
 233 
 234 void LIRItem::load_for_store(BasicType type) {
 235   if (_gen->can_store_as_constant(value(), type)) {
 236     _result = value()->operand();
 237     if (!_result->is_constant()) {

 593     assert(right_op != result_op, "malformed");
 594     __ move(left_op, result_op);
 595     left_op = result_op;
 596   }
 597 
 598   switch(code) {
 599     case Bytecodes::_iand:
 600     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 601 
 602     case Bytecodes::_ior:
 603     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 604 
 605     case Bytecodes::_ixor:
 606     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 607 
 608     default: ShouldNotReachHere();
 609   }
 610 }
 611 
 612 
 613 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
 614                                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) {
 615   if (!GenerateSynchronizationCode) return;
 616   // for slow path, use debug info for state after successful locking
 617   CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch);
 618   __ load_stack_address_monitor(monitor_no, lock);
 619   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 620   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub);
 621 }
 622 
 623 
 624 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 625   if (!GenerateSynchronizationCode) return;
 626   // setup registers
 627   LIR_Opr hdr = lock;
 628   lock = new_hdr;
 629   CodeStub* slow_path = new MonitorExitStub(lock, !UseHeavyMonitors, monitor_no);
 630   __ load_stack_address_monitor(monitor_no, lock);
 631   __ unlock_object(hdr, object, lock, scratch, slow_path);
 632 }
 633 
 634 #ifndef PRODUCT
 635 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 636   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 637     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 638   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 639     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 640   }
 641 }
 642 #endif
 643 
 644 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 645   if (allow_inline) {
 646     assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved");
 647     __ metadata2reg(klass->constant_encoding(), klass_reg);
 648   } else {
 649     klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 650   }
 651   // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass
 652   if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype())
 653       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 654 
 655     Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
 656 
 657     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 658 
 659     assert(klass->is_loaded(), "must be loaded");
 660     // allocate space for instance
 661     assert(klass->size_helper() > 0, "illegal instance size");
 662     const int instance_size = align_object_size(klass->size_helper());
 663     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 664                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 665   } else {
 666     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, allow_inline ? Runtime1::new_instance_id : Runtime1::new_instance_no_inline_id);
 667     __ jump(slow_path);
 668     __ branch_destination(slow_path->continuation());
 669   }
 670 }
 671 
 672 
 673 static bool is_constant_zero(Instruction* inst) {
 674   IntConstant* c = inst->type()->as_IntConstant();
 675   if (c) {
 676     return (c->value() == 0);
 677   }
 678   return false;
 679 }
 680 
 681 
 682 static bool positive_constant(Instruction* inst) {
 683   IntConstant* c = inst->type()->as_IntConstant();
 684   if (c) {
 685     return (c->value() >= 0);
 686   }
 687   return false;

 747       if (src_type != NULL) {
 748         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 749           is_exact = true;
 750           expected_type = dst_type;
 751         }
 752       }
 753     }
 754     // at least pass along a good guess
 755     if (expected_type == NULL) expected_type = dst_exact_type;
 756     if (expected_type == NULL) expected_type = src_declared_type;
 757     if (expected_type == NULL) expected_type = dst_declared_type;
 758 
 759     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 760     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 761   }
 762 
 763   // if a probable array type has been identified, figure out if any
 764   // of the required checks for a fast case can be elided.
 765   int flags = LIR_OpArrayCopy::all_flags;
 766 
 767   if (!src->is_loaded_flattened_array() && !dst->is_loaded_flattened_array()) {
 768     flags &= ~LIR_OpArrayCopy::always_slow_path;
 769   }
 770   if (!src->maybe_flattened_array()) {
 771     flags &= ~LIR_OpArrayCopy::src_inlinetype_check;
 772   }
 773   if (!dst->maybe_flattened_array() && !dst->maybe_null_free_array()) {
 774     flags &= ~LIR_OpArrayCopy::dst_inlinetype_check;
 775   }
 776 
 777   if (!src_objarray)
 778     flags &= ~LIR_OpArrayCopy::src_objarray;
 779   if (!dst_objarray)
 780     flags &= ~LIR_OpArrayCopy::dst_objarray;
 781 
 782   if (!x->arg_needs_null_check(0))
 783     flags &= ~LIR_OpArrayCopy::src_null_check;
 784   if (!x->arg_needs_null_check(2))
 785     flags &= ~LIR_OpArrayCopy::dst_null_check;
 786 
 787 
 788   if (expected_type != NULL) {
 789     Value length_limit = NULL;
 790 
 791     IfOp* ifop = length->as_IfOp();
 792     if (ifop != NULL) {
 793       // look for expressions like min(v, a.length) which ends up as
 794       //   x > y ? y : x  or  x >= y ? y : x
 795       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 796           ifop->x() == ifop->fval() &&

1561       case T_FLOAT:
1562         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1563         break;
1564       case T_LONG:
1565       case T_DOUBLE:
1566         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1567         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1568         break;
1569       case T_OBJECT:
1570         if (c->as_jobject() != other->as_jobject()) continue;
1571         break;
1572       default:
1573         break;
1574       }
1575       return _reg_for_constants.at(i);
1576     }
1577   }
1578 
1579   LIR_Opr result = new_register(t);
1580   __ move((LIR_Opr)c, result);
1581   if (!in_conditional_code()) {
1582     _constants.append(c);
1583     _reg_for_constants.append(result);
1584   }
1585   return result;
1586 }
1587 
1588 void LIRGenerator::set_in_conditional_code(bool v) {
1589   assert(v != _in_conditional_code, "must change state");
1590   _in_conditional_code = v;
1591 }
1592 
1593 
1594 //------------------------field access--------------------------------------
1595 
1596 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1597   assert(x->number_of_arguments() == 4, "wrong type");
1598   LIRItem obj   (x->argument_at(0), this);  // object
1599   LIRItem offset(x->argument_at(1), this);  // offset of field
1600   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1601   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1602   assert(obj.type()->tag() == objectTag, "invalid type");
1603   assert(cmp.type()->tag() == type->tag(), "invalid type");
1604   assert(val.type()->tag() == type->tag(), "invalid type");
1605 
1606   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1607                                             obj, offset, cmp, val);
1608   set_result(x, result);
1609 }
1610 
1611 // Comment copied form templateTable_i486.cpp
1612 // ----------------------------------------------------------------------------
1613 // Volatile variables demand their effects be made known to all CPU's in

1665     // load item if field not constant
1666     // because of code patching we cannot inline constants
1667     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1668       value.load_byte_item();
1669     } else  {
1670       value.load_item();
1671     }
1672   } else {
1673     value.load_for_store(field_type);
1674   }
1675 
1676   set_no_result(x);
1677 
1678 #ifndef PRODUCT
1679   if (PrintNotLoaded && needs_patching) {
1680     tty->print_cr("   ###class not loaded at store_%s bci %d",
1681                   x->is_static() ?  "static" : "field", x->printable_bci());
1682   }
1683 #endif
1684 
1685   if (!inline_type_field_access_prolog(x)) {
1686     // Field store will always deopt due to unloaded field or holder klass
1687     return;
1688   }
1689 
1690   if (x->needs_null_check() &&
1691       (needs_patching ||
1692        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1693     // Emit an explicit null check because the offset is too large.
1694     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1695     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1696     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1697   }
1698 
1699   DecoratorSet decorators = IN_HEAP;
1700   if (is_volatile) {
1701     decorators |= MO_SEQ_CST;
1702   }
1703   if (needs_patching) {
1704     decorators |= C1_NEEDS_PATCHING;
1705   }
1706 
1707   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1708                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1709 }
1710 
1711 // FIXME -- I can't find any other way to pass an address to access_load_at().
1712 class TempResolvedAddress: public Instruction {
1713  public:
1714   TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1715     set_operand(addr);
1716   }
1717   virtual void input_values_do(ValueVisitor*) {}
1718   virtual void visit(InstructionVisitor* v)   {}
1719   virtual const char* name() const  { return "TempResolvedAddress"; }
1720 };
1721 
1722 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) {
1723   ciType* array_type = array.value()->declared_type();
1724   ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass();
1725   assert(flat_array_klass->is_loaded(), "must be");
1726 
1727   int array_header_size = flat_array_klass->array_header_in_bytes();
1728   int shift = flat_array_klass->log2_element_size();
1729 
1730 #ifndef _LP64
1731   LIR_Opr index_op = new_register(T_INT);
1732   // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
1733   // the top (shift+1) bits of index_op must be zero, or
1734   // else throw ArrayIndexOutOfBoundsException
1735   if (index.result()->is_constant()) {
1736     jint const_index = index.result()->as_jint();
1737     __ move(LIR_OprFact::intConst(const_index << shift), index_op);
1738   } else {
1739     __ shift_left(index_op, shift, index.result());
1740   }
1741 #else
1742   LIR_Opr index_op = new_register(T_LONG);
1743   if (index.result()->is_constant()) {
1744     jint const_index = index.result()->as_jint();
1745     __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1746   } else {
1747     __ convert(Bytecodes::_i2l, index.result(), index_op);
1748     // Need to shift manually, as LIR_Address can scale only up to 3.
1749     __ shift_left(index_op, shift, index_op);
1750   }
1751 #endif
1752 
1753   LIR_Opr elm_op = new_pointer_register();
1754   LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS);
1755   __ leal(LIR_OprFact::address(elm_address), elm_op);
1756   return elm_op;
1757 }
1758 
1759 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, int sub_offset) {
1760   assert(field != NULL, "Need a subelement type specified");
1761 
1762   // Find the starting address of the source (inside the array)
1763   LIR_Opr elm_op = get_and_load_element_address(array, index);
1764 
1765   BasicType subelt_type = field->type()->basic_type();
1766   TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op);
1767   LIRItem elm_item(elm_resolved_addr, this);
1768 
1769   DecoratorSet decorators = IN_HEAP;
1770   access_load_at(decorators, subelt_type,
1771                      elm_item, LIR_OprFact::intConst(sub_offset), result,
1772                      NULL, NULL);
1773 
1774   if (field->is_null_free()) {
1775     assert(field->type()->is_loaded(), "Must be");
1776     assert(field->type()->is_inlinetype(), "Must be if loaded");
1777     assert(field->type()->as_inline_klass()->is_initialized(), "Must be");
1778     LabelObj* L_end = new LabelObj();
1779     __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL));
1780     __ branch(lir_cond_notEqual, L_end->label());
1781     set_in_conditional_code(true);
1782     Constant* default_value = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance()));
1783     if (default_value->is_pinned()) {
1784       __ move(LIR_OprFact::value_type(default_value->type()), result);
1785     } else {
1786       __ move(load_constant(default_value), result);
1787     }
1788     __ branch_destination(L_end->label());
1789     set_in_conditional_code(false);
1790   }
1791 }
1792 
1793 void LIRGenerator::access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item,
1794                                           ciField* field, int sub_offset) {
1795   assert(sub_offset == 0 || field != NULL, "Sanity check");
1796 
1797   // Find the starting address of the source (inside the array)
1798   LIR_Opr elm_op = get_and_load_element_address(array, index);
1799 
1800   ciInlineKlass* elem_klass = NULL;
1801   if (field != NULL) {
1802     elem_klass = field->type()->as_inline_klass();
1803   } else {
1804     elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
1805   }
1806   for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1807     ciField* inner_field = elem_klass->nonstatic_field_at(i);
1808     assert(!inner_field->is_flattened(), "flattened fields must have been expanded");
1809     int obj_offset = inner_field->offset();
1810     int elm_offset = obj_offset - elem_klass->first_field_offset() + sub_offset; // object header is not stored in array.
1811     BasicType field_type = inner_field->type()->basic_type();
1812 
1813     // Types which are smaller than int are still passed in an int register.
1814     BasicType reg_type = field_type;
1815     switch (reg_type) {
1816     case T_BYTE:
1817     case T_BOOLEAN:
1818     case T_SHORT:
1819     case T_CHAR:
1820       reg_type = T_INT;
1821       break;
1822     default:
1823       break;
1824     }
1825 
1826     LIR_Opr temp = new_register(reg_type);
1827     TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1828     LIRItem elm_item(elm_resolved_addr, this);
1829 
1830     DecoratorSet decorators = IN_HEAP;
1831     if (is_load) {
1832       access_load_at(decorators, field_type,
1833                      elm_item, LIR_OprFact::intConst(elm_offset), temp,
1834                      NULL, NULL);
1835       access_store_at(decorators, field_type,
1836                       obj_item, LIR_OprFact::intConst(obj_offset), temp,
1837                       NULL, NULL);
1838     } else {
1839       access_load_at(decorators, field_type,
1840                      obj_item, LIR_OprFact::intConst(obj_offset), temp,
1841                      NULL, NULL);
1842       access_store_at(decorators, field_type,
1843                       elm_item, LIR_OprFact::intConst(elm_offset), temp,
1844                       NULL, NULL);
1845     }
1846   }
1847 }
1848 
1849 void LIRGenerator::check_flattened_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
1850   LIR_Opr tmp = new_register(T_METADATA);
1851   __ check_flattened_array(array, value, tmp, slow_path);
1852 }
1853 
1854 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1855   LabelObj* L_end = new LabelObj();
1856   LIR_Opr tmp = new_register(T_METADATA);
1857   __ check_null_free_array(array.result(), tmp);
1858   __ branch(lir_cond_equal, L_end->label());
1859   __ null_check(value.result(), info);
1860   __ branch_destination(L_end->label());
1861 }
1862 
1863 bool LIRGenerator::needs_flattened_array_store_check(StoreIndexed* x) {
1864   if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
1865     ciType* type = x->value()->declared_type();
1866     if (type != NULL && type->is_klass()) {
1867       ciKlass* klass = type->as_klass();
1868       if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->flatten_array())) {
1869         // This is known to be a non-flattened object. If the array is flattened,
1870         // it will be caught by the code generated by array_store_check().
1871         return false;
1872       }
1873     }
1874     // We're not 100% sure, so let's do the flattened_array_store_check.
1875     return true;
1876   }
1877   return false;
1878 }
1879 
1880 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1881   return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1882 }
1883 
1884 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1885   assert(x->is_pinned(),"");
1886   assert(x->elt_type() != T_ARRAY, "never used");
1887   bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array();
1888   bool needs_range_check = x->compute_needs_range_check();
1889   bool use_length = x->length() != NULL;
1890   bool obj_store = is_reference_type(x->elt_type());
1891   bool needs_store_check = obj_store && !(is_loaded_flattened_array && x->is_exact_flattened_array_store()) &&
1892                                         (x->value()->as_Constant() == NULL ||
1893                                          !get_jobject_constant(x->value())->is_null_object());
1894 
1895   LIRItem array(x->array(), this);
1896   LIRItem index(x->index(), this);
1897   LIRItem value(x->value(), this);
1898   LIRItem length(this);
1899 
1900   array.load_item();
1901   index.load_nonconstant();
1902 
1903   if (use_length && needs_range_check) {
1904     length.set_instruction(x->length());
1905     length.load_item();

1906   }
1907 
1908   if (needs_store_check || x->check_boolean()
1909       || is_loaded_flattened_array || needs_flattened_array_store_check(x) || needs_null_free_array_store_check(x)) {
1910     value.load_item();
1911   } else {
1912     value.load_for_store(x->elt_type());
1913   }
1914 
1915   set_no_result(x);
1916 
1917   // the CodeEmitInfo must be duplicated for each different
1918   // LIR-instruction because spilling can occur anywhere between two
1919   // instructions and so the debug information must be different
1920   CodeEmitInfo* range_check_info = state_for(x);
1921   CodeEmitInfo* null_check_info = NULL;
1922   if (x->needs_null_check()) {
1923     null_check_info = new CodeEmitInfo(range_check_info);
1924   }
1925 
1926   if (GenerateRangeChecks && needs_range_check) {
1927     if (use_length) {
1928       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1929       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1930     } else {
1931       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1932       // range_check also does the null check
1933       null_check_info = NULL;
1934     }
1935   }
1936 
1937   if (x->should_profile()) {
1938     if (x->array()->is_loaded_flattened_array()) {
1939       // No need to profile a store to a flattened array of known type. This can happen if
1940       // the type only became known after optimizations (for example, after the PhiSimplifier).
1941       x->set_should_profile(false);
1942     } else {
1943       ciMethodData* md = NULL;
1944       ciArrayLoadStoreData* load_store = NULL;
1945       profile_array_type(x, md, load_store);
1946       if (x->array()->maybe_null_free_array()) {
1947         profile_null_free_array(array, md, load_store);
1948       }
1949       profile_element_type(x->value(), md, load_store);
1950     }
1951   }
1952 
1953   if (GenerateArrayStoreCheck && needs_store_check) {
1954     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1955     array_store_check(value.result(), array.result(), store_check_info, NULL, -1);
1956   }
1957 
1958   if (is_loaded_flattened_array) {
1959     if (!x->value()->is_null_free()) {
1960       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1961     }
1962     // If array element is an empty inline type, no need to copy anything
1963     if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
1964       access_flattened_array(false, array, index, value);
1965     }
1966   } else {
1967     StoreFlattenedArrayStub* slow_path = NULL;
1968 
1969     if (needs_flattened_array_store_check(x)) {
1970       // Check if we indeed have a flattened array
1971       index.load_item();
1972       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before()));
1973       check_flattened_array(array.result(), value.result(), slow_path);
1974       set_in_conditional_code(true);
1975     } else if (needs_null_free_array_store_check(x)) {
1976       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1977       check_null_free_array(array, value, info);
1978     }
1979 
1980     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1981     if (x->check_boolean()) {
1982       decorators |= C1_MASK_BOOLEAN;
1983     }
1984 
1985     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1986                     NULL, null_check_info);
1987     if (slow_path != NULL) {
1988       __ branch_destination(slow_path->continuation());
1989       set_in_conditional_code(false);
1990     }
1991   }
1992 }
1993 
1994 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1995                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1996                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1997   decorators |= ACCESS_READ;
1998   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1999   if (access.is_raw()) {
2000     _barrier_set->BarrierSetC1::load_at(access, result);
2001   } else {
2002     _barrier_set->load_at(access, result);
2003   }
2004 }
2005 
2006 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
2007                                LIR_Opr addr, LIR_Opr result) {
2008   decorators |= ACCESS_READ;
2009   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
2010   access.set_resolved_addr(addr);
2011   if (access.is_raw()) {

2052     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
2053   } else {
2054     return _barrier_set->atomic_xchg_at(access, value);
2055   }
2056 }
2057 
2058 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
2059                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
2060   decorators |= ACCESS_READ;
2061   decorators |= ACCESS_WRITE;
2062   // Atomic operations are SEQ_CST by default
2063   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
2064   LIRAccess access(this, decorators, base, offset, type);
2065   if (access.is_raw()) {
2066     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
2067   } else {
2068     return _barrier_set->atomic_add_at(access, value);
2069   }
2070 }
2071 
2072 bool LIRGenerator::inline_type_field_access_prolog(AccessField* x) {
2073   ciField* field = x->field();
2074   assert(!field->is_flattened(), "Flattened field access should have been expanded");
2075   if (!field->is_null_free()) {
2076     return true; // Not an inline type field
2077   }
2078   // Deoptimize if the access is non-static and requires patching (holder not loaded
2079   // or not accessible) because then we only have partial field information and the
2080   // field could be flattened (see ciField constructor).
2081   bool could_be_flat = !x->is_static() && x->needs_patching();
2082   // Deoptimize if we load from a static field with an uninitialized type because we
2083   // need to throw an exception if initialization of the type failed.
2084   bool not_initialized = x->is_static() && x->as_LoadField() != NULL &&
2085       !field->type()->as_instance_klass()->is_initialized();
2086   if (could_be_flat || not_initialized) {
2087     CodeEmitInfo* info = state_for(x, x->state_before());
2088     CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
2089                                         Deoptimization::Reason_unloaded,
2090                                         Deoptimization::Action_make_not_entrant);
2091     __ jump(stub);
2092     return false;
2093   }
2094   return true;
2095 }
2096 
2097 void LIRGenerator::do_LoadField(LoadField* x) {
2098   bool needs_patching = x->needs_patching();
2099   bool is_volatile = x->field()->is_volatile();
2100   BasicType field_type = x->field_type();
2101 
2102   CodeEmitInfo* info = NULL;
2103   if (needs_patching) {
2104     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
2105     info = state_for(x, x->state_before());
2106   } else if (x->needs_null_check()) {
2107     NullCheck* nc = x->explicit_null_check();
2108     if (nc == NULL) {
2109       info = state_for(x);
2110     } else {
2111       info = state_for(nc);
2112     }
2113   }
2114 
2115   LIRItem object(x->obj(), this);
2116 
2117   object.load_item();
2118 
2119 #ifndef PRODUCT
2120   if (PrintNotLoaded && needs_patching) {
2121     tty->print_cr("   ###class not loaded at load_%s bci %d",
2122                   x->is_static() ?  "static" : "field", x->printable_bci());
2123   }
2124 #endif
2125 
2126   if (!inline_type_field_access_prolog(x)) {
2127     // Field load will always deopt due to unloaded field or holder klass
2128     LIR_Opr result = rlock_result(x, field_type);
2129     __ move(LIR_OprFact::oopConst(NULL), result);
2130     return;
2131   }
2132 
2133   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
2134   if (x->needs_null_check() &&
2135       (needs_patching ||
2136        MacroAssembler::needs_explicit_null_check(x->offset()) ||
2137        stress_deopt)) {
2138     LIR_Opr obj = object.result();
2139     if (stress_deopt) {
2140       obj = new_register(T_OBJECT);
2141       __ move(LIR_OprFact::oopConst(NULL), obj);
2142     }
2143     // Emit an explicit null check because the offset is too large.
2144     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
2145     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2146     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2147   }
2148 
2149   DecoratorSet decorators = IN_HEAP;
2150   if (is_volatile) {
2151     decorators |= MO_SEQ_CST;
2152   }
2153   if (needs_patching) {
2154     decorators |= C1_NEEDS_PATCHING;
2155   }
2156 
2157   LIR_Opr result = rlock_result(x, field_type);
2158   access_load_at(decorators, field_type,
2159                  object, LIR_OprFact::intConst(x->offset()), result,
2160                  info ? new CodeEmitInfo(info) : NULL, info);
2161 
2162   ciField* field = x->field();
2163   if (field->is_null_free()) {
2164     // Load from non-flattened inline type field requires
2165     // a null check to replace null with the default value.
2166     ciInstanceKlass* holder = field->holder();
2167     if (field->is_static() && holder->is_loaded()) {
2168       ciObject* val = holder->java_mirror()->field_value(field).as_object();
2169       if (!val->is_null_object()) {
2170         // Static field is initialized, we don't need to perform a null check.
2171         return;
2172       }
2173     }
2174     ciInlineKlass* inline_klass = field->type()->as_inline_klass();
2175     if (inline_klass->is_initialized()) {
2176       LabelObj* L_end = new LabelObj();
2177       __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL));
2178       __ branch(lir_cond_notEqual, L_end->label());
2179       set_in_conditional_code(true);
2180       Constant* default_value = new Constant(new InstanceConstant(inline_klass->default_instance()));
2181       if (default_value->is_pinned()) {
2182         __ move(LIR_OprFact::value_type(default_value->type()), result);
2183       } else {
2184         __ move(load_constant(default_value), result);
2185       }
2186       __ branch_destination(L_end->label());
2187       set_in_conditional_code(false);
2188     } else {
2189       __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
2190       __ branch(lir_cond_equal, new DeoptimizeStub(info, Deoptimization::Reason_uninitialized,
2191                                                          Deoptimization::Action_make_not_entrant));
2192     }
2193   }
2194 }
2195 
2196 // int/long jdk.internal.util.Preconditions.checkIndex
2197 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) {
2198   assert(x->number_of_arguments() == 3, "wrong type");
2199   LIRItem index(x->argument_at(0), this);
2200   LIRItem length(x->argument_at(1), this);
2201   LIRItem oobef(x->argument_at(2), this);
2202 
2203   index.load_item();
2204   length.load_item();
2205   oobef.load_item();
2206 
2207   LIR_Opr result = rlock_result(x);
2208   // x->state() is created from copy_state_for_exception, it does not contains arguments
2209   // we should prepare them before entering into interpreter mode due to deoptimization.
2210   ValueStack* state = x->state();
2211   for (int i = 0; i < x->number_of_arguments(); i++) {
2212     Value arg = x->argument_at(i);
2213     state->push(arg->type(), arg);

2318       __ move(LIR_OprFact::oopConst(NULL), obj);
2319       __ null_check(obj, new CodeEmitInfo(null_check_info));
2320     }
2321   }
2322 
2323   if (GenerateRangeChecks && needs_range_check) {
2324     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2325       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
2326     } else if (use_length) {
2327       // TODO: use a (modified) version of array_range_check that does not require a
2328       //       constant length to be loaded to a register
2329       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2330       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
2331     } else {
2332       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2333       // The range check performs the null check, so clear it out for the load
2334       null_check_info = NULL;
2335     }
2336   }
2337 
2338   ciMethodData* md = NULL;
2339   ciArrayLoadStoreData* load_store = NULL;
2340   if (x->should_profile()) {
2341     if (x->array()->is_loaded_flattened_array()) {
2342       // No need to profile a load from a flattened array of known type. This can happen if
2343       // the type only became known after optimizations (for example, after the PhiSimplifier).
2344       x->set_should_profile(false);
2345     } else {
2346       profile_array_type(x, md, load_store);
2347     }
2348   }
2349 
2350   Value element;
2351   if (x->vt() != NULL) {
2352     assert(x->array()->is_loaded_flattened_array(), "must be");
2353     // Find the destination address (of the NewInlineTypeInstance).
2354     LIRItem obj_item(x->vt(), this);
2355 
2356     access_flattened_array(true, array, index, obj_item,
2357                            x->delayed() == NULL ? 0 : x->delayed()->field(),
2358                            x->delayed() == NULL ? 0 : x->delayed()->offset());
2359     set_no_result(x);
2360   } else if (x->delayed() != NULL) {
2361     assert(x->array()->is_loaded_flattened_array(), "must be");
2362     LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type());
2363     access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset());
2364   } else if (x->array() != NULL && x->array()->is_loaded_flattened_array() &&
2365              x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_initialized() &&
2366              x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) {
2367     // Load the default instance instead of reading the element
2368     ciInlineKlass* elem_klass = x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass();
2369     LIR_Opr result = rlock_result(x, x->elt_type());
2370     assert(elem_klass->is_initialized(), "Must be");
2371     Constant* default_value = new Constant(new InstanceConstant(elem_klass->default_instance()));
2372     if (default_value->is_pinned()) {
2373       __ move(LIR_OprFact::value_type(default_value->type()), result);
2374     } else {
2375       __ move(load_constant(default_value), result);
2376     }
2377   } else {
2378     LIR_Opr result = rlock_result(x, x->elt_type());
2379     LoadFlattenedArrayStub* slow_path = NULL;
2380 
2381     if (x->should_profile() && x->array()->maybe_null_free_array()) {
2382       profile_null_free_array(array, md, load_store);
2383     }
2384 
2385     if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
2386       assert(x->delayed() == NULL, "Delayed LoadIndexed only apply to loaded_flattened_arrays");
2387       index.load_item();
2388       // if we are loading from flattened array, load it using a runtime call
2389       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before()));
2390       check_flattened_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
2391       set_in_conditional_code(true);
2392     }
2393 
2394     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2395     access_load_at(decorators, x->elt_type(),
2396                    array, index.result(), result,
2397                    NULL, null_check_info);
2398 
2399     if (slow_path != NULL) {
2400       __ branch_destination(slow_path->continuation());
2401       set_in_conditional_code(false);
2402     }
2403 
2404     element = x;
2405   }
2406 
2407   if (x->should_profile()) {
2408     profile_element_type(element, md, load_store);
2409   }

2410 }
2411 
2412 void LIRGenerator::do_Deoptimize(Deoptimize* x) {
2413   // This happens only when a class X uses the withfield/aconst_init bytecode
2414   // to refer to an inline class V, where V has not yet been loaded/resolved.
2415   // This is not a common case. Let's just deoptimize.
2416   CodeEmitInfo* info = state_for(x, x->state_before());
2417   CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
2418                                       Deoptimization::Reason_unloaded,
2419                                       Deoptimization::Action_make_not_entrant);
2420   __ jump(stub);
2421   LIR_Opr reg = rlock_result(x, T_OBJECT);
2422   __ move(LIR_OprFact::oopConst(NULL), reg);
2423 }
2424 
2425 void LIRGenerator::do_NullCheck(NullCheck* x) {
2426   if (x->can_trap()) {
2427     LIRItem value(x->obj(), this);
2428     value.load_item();
2429     CodeEmitInfo* info = state_for(x);
2430     __ null_check(value.result(), info);
2431   }
2432 }
2433 
2434 
2435 void LIRGenerator::do_TypeCast(TypeCast* x) {
2436   LIRItem value(x->obj(), this);
2437   value.load_item();
2438   // the result is the same as from the node we are casting
2439   set_result(x, value.result());
2440 }
2441 
2442 
2443 void LIRGenerator::do_Throw(Throw* x) {

2902   Compilation* comp = Compilation::current();
2903   if (do_update) {
2904     // try to find exact type, using CHA if possible, so that loading
2905     // the klass from the object can be avoided
2906     ciType* type = obj->exact_type();
2907     if (type == NULL) {
2908       type = obj->declared_type();
2909       type = comp->cha_exact_type(type);
2910     }
2911     assert(type == NULL || type->is_klass(), "type should be class");
2912     exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2913 
2914     do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2915   }
2916 
2917   if (!do_null && !do_update) {
2918     return result;
2919   }
2920 
2921   ciKlass* exact_signature_k = NULL;
2922   if (do_update && signature_at_call_k != NULL) {
2923     // Is the type from the signature exact (the only one possible)?
2924     exact_signature_k = signature_at_call_k->exact_klass();
2925     if (exact_signature_k == NULL) {
2926       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2927     } else {
2928       result = exact_signature_k;
2929       // Known statically. No need to emit any code: prevent
2930       // LIR_Assembler::emit_profile_type() from emitting useless code
2931       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2932     }
2933     // exact_klass and exact_signature_k can be both non NULL but
2934     // different if exact_klass is loaded after the ciObject for
2935     // exact_signature_k is created.
2936     if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2937       // sometimes the type of the signature is better than the best type
2938       // the compiler has
2939       exact_klass = exact_signature_k;
2940     }
2941     if (callee_signature_k != NULL &&
2942         callee_signature_k != signature_at_call_k) {

2987         assert(!src->is_illegal(), "check");
2988         BasicType t = src->type();
2989         if (is_reference_type(t)) {
2990           intptr_t profiled_k = parameters->type(j);
2991           Local* local = x->state()->local_at(java_index)->as_Local();
2992           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2993                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2994                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2995           // If the profile is known statically set it once for all and do not emit any code
2996           if (exact != NULL) {
2997             md->set_parameter_type(j, exact);
2998           }
2999           j++;
3000         }
3001         java_index += type2size[t];
3002       }
3003     }
3004   }
3005 }
3006 
3007 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) {
3008   assert(md != NULL && data != NULL, "should have been initialized");
3009   LIR_Opr mdp = new_register(T_METADATA);
3010   __ metadata2reg(md->constant_encoding(), mdp);
3011   LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE);
3012   LIR_Opr flags = new_register(T_INT);
3013   __ move(addr, flags);
3014   if (condition != lir_cond_always) {
3015     LIR_Opr update = new_register(T_INT);
3016     __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT);
3017   } else {
3018     __ logical_or(flags, LIR_OprFact::intConst(flag), flags);
3019   }
3020   __ store(flags, addr);
3021 }
3022 
3023 void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ciArrayLoadStoreData* load_store) {
3024   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3025   LabelObj* L_end = new LabelObj();
3026   LIR_Opr tmp = new_register(T_METADATA);
3027   __ check_null_free_array(array.result(), tmp);
3028 
3029   profile_flags(md, load_store, ArrayLoadStoreData::null_free_array_byte_constant(), lir_cond_equal);
3030 }
3031 
3032 void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ciArrayLoadStoreData*& load_store) {
3033   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3034   int bci = x->profiled_bci();
3035   md = x->profiled_method()->method_data();
3036   assert(md != NULL, "Sanity");
3037   ciProfileData* data = md->bci_to_data(bci);
3038   assert(data != NULL && data->is_ArrayLoadStoreData(), "incorrect profiling entry");
3039   load_store = (ciArrayLoadStoreData*)data;
3040   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3041   profile_type(md, md->byte_offset_of_slot(load_store, ArrayLoadStoreData::array_offset()), 0,
3042                load_store->array()->type(), x->array(), mdp, true, NULL, NULL);
3043 }
3044 
3045 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadStoreData* load_store) {
3046   assert(compilation()->profile_array_accesses(), "array access profiling is disabled");
3047   assert(md != NULL && load_store != NULL, "should have been initialized");
3048   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3049   profile_type(md, md->byte_offset_of_slot(load_store, ArrayLoadStoreData::element_offset()), 0,
3050                load_store->element()->type(), element, mdp, false, NULL, NULL);
3051 }
3052 
3053 void LIRGenerator::do_Base(Base* x) {
3054   __ std_entry(LIR_OprFact::illegalOpr);
3055   // Emit moves from physical registers / stack slots to virtual registers
3056   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3057   IRScope* irScope = compilation()->hir()->top_scope();
3058   int java_index = 0;
3059   for (int i = 0; i < args->length(); i++) {
3060     LIR_Opr src = args->at(i);
3061     assert(!src->is_illegal(), "check");
3062     BasicType t = src->type();
3063 
3064     // Types which are smaller than int are passed as int, so
3065     // correct the type which passed.
3066     switch (t) {
3067     case T_BYTE:
3068     case T_BOOLEAN:
3069     case T_SHORT:
3070     case T_CHAR:
3071       t = T_INT;
3072       break;

3113     }
3114     assert(obj->is_valid(), "must be valid");
3115 
3116     if (method()->is_synchronized() && GenerateSynchronizationCode) {
3117       LIR_Opr lock = syncLockOpr();
3118       __ load_stack_address_monitor(0, lock);
3119 
3120       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
3121       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3122 
3123       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
3124       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
3125     }
3126   }
3127   // increment invocation counters if needed
3128   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3129     profile_parameters(x);
3130     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
3131     increment_invocation_counter(info);
3132   }
3133   if (method()->has_scalarized_args()) {
3134     // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments
3135     // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state.
3136     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
3137     CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none);
3138     __ append(new LIR_Op0(lir_check_orig_pc));
3139     __ branch(lir_cond_notEqual, deopt_stub);
3140   }
3141 
3142   // all blocks with a successor must end with an unconditional jump
3143   // to the successor even if they are consecutive
3144   __ jump(x->default_sux());
3145 }
3146 
3147 
3148 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3149   // construct our frame and model the production of incoming pointer
3150   // to the OSR buffer.
3151   __ osr_entry(LIR_Assembler::osrBufferPointer());
3152   LIR_Opr result = rlock_result(x);
3153   __ move(LIR_Assembler::osrBufferPointer(), result);
3154 }
3155 
3156 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3157   if (loc->is_register()) {
3158     param->load_item_force(loc);
3159   } else {
3160     LIR_Address* addr = loc->as_address_ptr();
3161     param->load_for_store(addr->type());
3162     assert(addr->type() != T_PRIMITIVE_OBJECT, "not supported yet");
3163     if (addr->type() == T_OBJECT) {
3164       __ move_wide(param->result(), addr);
3165     } else {
3166       __ move(param->result(), addr);
3167     }
3168   }
3169 }
3170 
3171 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3172   assert(args->length() == arg_list->length(),
3173          "args=%d, arg_list=%d", args->length(), arg_list->length());
3174   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3175     LIRItem* param = args->at(i);
3176     LIR_Opr loc = arg_list->at(i);
3177     invoke_load_one_argument(param, loc);









3178   }
3179 
3180   if (x->has_receiver()) {
3181     LIRItem* receiver = args->at(0);
3182     LIR_Opr loc = arg_list->at(0);
3183     if (loc->is_register()) {
3184       receiver->load_item_force(loc);
3185     } else {
3186       assert(loc->is_address(), "just checking");
3187       receiver->load_for_store(T_OBJECT);
3188       __ move_wide(receiver->result(), loc->as_address_ptr());
3189     }
3190   }
3191 }
3192 
3193 
3194 // Visits all arguments, returns appropriate items without loading them
3195 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3196   LIRItemList* argument_items = new LIRItemList();
3197   if (x->has_receiver()) {

3323   __ move(tmp, reg);
3324 }
3325 
3326 
3327 
3328 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3329 void LIRGenerator::do_IfOp(IfOp* x) {
3330 #ifdef ASSERT
3331   {
3332     ValueTag xtag = x->x()->type()->tag();
3333     ValueTag ttag = x->tval()->type()->tag();
3334     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3335     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3336     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3337   }
3338 #endif
3339 
3340   LIRItem left(x->x(), this);
3341   LIRItem right(x->y(), this);
3342   left.load_item();
3343   if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3344     right.dont_load_item();
3345   } else {
3346     // substitutability_check() needs to use right as a base register.
3347     right.load_item();
3348   }
3349 
3350   LIRItem t_val(x->tval(), this);
3351   LIRItem f_val(x->fval(), this);
3352   t_val.dont_load_item();
3353   f_val.dont_load_item();

3354 
3355   if (x->substitutability_check()) {
3356     substitutability_check(x, left, right, t_val, f_val);
3357   } else {
3358     LIR_Opr reg = rlock_result(x);
3359     __ cmp(lir_cond(x->cond()), left.result(), right.result());
3360     __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3361   }
3362 }
3363 
3364 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3365   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3366   bool is_acmpeq = (x->cond() == If::eql);
3367   LIR_Opr equal_result     = is_acmpeq ? t_val.result() : f_val.result();
3368   LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3369   LIR_Opr result = rlock_result(x);
3370   CodeEmitInfo* info = state_for(x, x->state_before());
3371 
3372   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3373 }
3374 
3375 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3376   LIR_Opr equal_result     = LIR_OprFact::intConst(1);
3377   LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3378   LIR_Opr result = new_register(T_INT);
3379   CodeEmitInfo* info = state_for(x, x->state_before());
3380 
3381   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3382 
3383   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3384   __ cmp(lir_cond(x->cond()), result, equal_result);
3385 }
3386 
3387 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3388                                                  LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3389                                                  CodeEmitInfo* info) {
3390   LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3391   LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3392   LIR_Opr left_klass_op = LIR_OprFact::illegalOpr;
3393   LIR_Opr right_klass_op = LIR_OprFact::illegalOpr;
3394 
3395   ciKlass* left_klass  = left_val ->as_loaded_klass_or_null();
3396   ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3397 
3398   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
3399       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
3400     init_temps_for_substitutability_check(tmp1, tmp2);
3401   }
3402 
3403   if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
3404     // No need to load klass -- the operands are statically known to be the same inline klass.
3405   } else {
3406     BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3407     left_klass_op = new_register(t_klass);
3408     right_klass_op = new_register(t_klass);
3409   }
3410 
3411   CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3412   __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3413                             tmp1, tmp2,
3414                             left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path);
3415 }
3416 
3417 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3418   assert(x->number_of_arguments() == 0, "wrong type");
3419   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3420   BasicTypeList signature;
3421   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3422   LIR_Opr reg = result_register_for(x->type());
3423   __ call_runtime_leaf(routine, getThreadTemp(),
3424                        reg, new LIR_OprList());
3425   LIR_Opr result = rlock_result(x);
3426   __ move(reg, result);
3427 }
3428 
3429 
3430 
3431 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3432   switch (x->id()) {
3433   case vmIntrinsics::_intBitsToFloat      :
3434   case vmIntrinsics::_doubleToRawLongBits :

3664   if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3665     profile_parameters_at_call(x);
3666   }
3667 
3668   if (x->recv() != NULL) {
3669     LIRItem value(x->recv(), this);
3670     value.load_item();
3671     recv = new_register(T_OBJECT);
3672     __ move(value.result(), recv);
3673   }
3674   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3675 }
3676 
3677 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3678   int bci = x->bci_of_invoke();
3679   ciMethodData* md = x->method()->method_data_or_null();
3680   assert(md != NULL, "Sanity");
3681   ciProfileData* data = md->bci_to_data(bci);
3682   if (data != NULL) {
3683     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3684     ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3685     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3686 
3687     bool ignored_will_link;
3688     ciSignature* signature_at_call = NULL;
3689     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3690 
3691     // The offset within the MDO of the entry to update may be too large
3692     // to be used in load/store instructions on some platforms. So have
3693     // profile_type() compute the address of the profile in a register.
3694     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3695         ret->type(), x->ret(), mdp,
3696         !x->needs_null_check(),
3697         signature_at_call->return_type()->as_klass(),
3698         x->callee()->signature()->return_type()->as_klass());
3699     if (exact != NULL) {
3700       md->set_return_type(bci, exact);
3701     }
3702   }
3703 }
3704 
3705 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) {
3706   ciKlass* klass = value->as_loaded_klass_or_null();
3707   if (klass != NULL) {
3708     if (klass->is_inlinetype()) {
3709       profile_flags(md, data, flag, lir_cond_always);
3710     } else if (klass->can_be_inline_klass()) {
3711       return false;
3712     }
3713   } else {
3714     return false;
3715   }
3716   return true;
3717 }
3718 
3719 
3720 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) {
3721   ciMethod* method = x->method();
3722   assert(method != NULL, "method should be set if branch is profiled");
3723   ciMethodData* md = method->method_data_or_null();
3724   assert(md != NULL, "Sanity");
3725   ciProfileData* data = md->bci_to_data(x->bci());
3726   assert(data != NULL, "must have profiling data");
3727   assert(data->is_ACmpData(), "need BranchData for two-way branches");
3728   ciACmpData* acmp = (ciACmpData*)data;
3729   LIR_Opr mdp = LIR_OprFact::illegalOpr;
3730   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0,
3731                acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), NULL, NULL);
3732   int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset());
3733   if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) {
3734     LIR_Opr mdp = new_register(T_METADATA);
3735     __ metadata2reg(md->constant_encoding(), mdp);
3736     LIRItem value(x->left(), this);
3737     value.load_item();
3738     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3739   }
3740   profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()),
3741                in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()),
3742                acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), NULL, NULL);
3743   if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) {
3744     LIR_Opr mdp = new_register(T_METADATA);
3745     __ metadata2reg(md->constant_encoding(), mdp);
3746     LIRItem value(x->right(), this);
3747     value.load_item();
3748     __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null());
3749   }
3750 }
3751 
3752 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3753   // We can safely ignore accessors here, since c2 will inline them anyway,
3754   // accessors are also always mature.
3755   if (!x->inlinee()->is_accessor()) {
3756     CodeEmitInfo* info = state_for(x, x->state(), true);
3757     // Notify the runtime very infrequently only to take care of counter overflows
3758     int freq_log = Tier23InlineeNotifyFreqLog;
3759     double scale;
3760     if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3761       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3762     }
3763     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3764   }
3765 }
3766 
3767 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3768   if (compilation()->is_profiling()) {
3769 #if defined(X86) && !defined(_LP64)
3770     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3771     LIR_Opr left_copy = new_register(left->type());
< prev index next >