< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"


  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c1/barrierSetC1.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/vm_version.hpp"
  43 #include "utilities/bitMap.inline.hpp"
  44 #include "utilities/macros.hpp"
  45 
  46 #ifdef ASSERT
  47 #define __ gen()->lir(__FILE__, __LINE__)->
  48 #else
  49 #define __ gen()->lir()->
  50 #endif
  51 
  52 #ifndef PATCHED_ADDR
  53 #define PATCHED_ADDR  (max_jint)
  54 #endif
  55 
  56 void PhiResolverState::reset() {


 192   ResolveNode* source = source_node(src);
 193   source->append(destination_node(dest));
 194 }
 195 
 196 
 197 //--------------------------------------------------------------
 198 // LIRItem
 199 
 200 void LIRItem::set_result(LIR_Opr opr) {
 201   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 202   value()->set_operand(opr);
 203 
 204   if (opr->is_virtual()) {
 205     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
 206   }
 207 
 208   _result = opr;
 209 }
 210 
 211 void LIRItem::load_item() {


 212   if (result()->is_illegal()) {
 213     // update the items result
 214     _result = value()->operand();
 215   }
 216   if (!result()->is_register()) {
 217     LIR_Opr reg = _gen->new_register(value()->type());
 218     __ move(result(), reg);
 219     if (result()->is_constant()) {
 220       _result = reg;
 221     } else {
 222       set_result(reg);
 223     }
 224   }
 225 }
 226 
 227 
 228 void LIRItem::load_for_store(BasicType type) {
 229   if (_gen->can_store_as_constant(value(), type)) {
 230     _result = value()->operand();
 231     if (!_result->is_constant()) {


 619     assert(right_op != result_op, "malformed");
 620     __ move(left_op, result_op);
 621     left_op = result_op;
 622   }
 623 
 624   switch(code) {
 625     case Bytecodes::_iand:
 626     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 627 
 628     case Bytecodes::_ior:
 629     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 630 
 631     case Bytecodes::_ixor:
 632     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 633 
 634     default: ShouldNotReachHere();
 635   }
 636 }
 637 
 638 
 639 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {

 640   if (!GenerateSynchronizationCode) return;
 641   // for slow path, use debug info for state after successful locking
 642   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 643   __ load_stack_address_monitor(monitor_no, lock);
 644   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 645   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 646 }
 647 
 648 
 649 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 650   if (!GenerateSynchronizationCode) return;
 651   // setup registers
 652   LIR_Opr hdr = lock;
 653   lock = new_hdr;
 654   CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
 655   __ load_stack_address_monitor(monitor_no, lock);
 656   __ unlock_object(hdr, object, lock, scratch, slow_path);
 657 }
 658 
 659 #ifndef PRODUCT
 660 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 661   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 662     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 663   } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
 664     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 665   }


 767       if (src_type != NULL) {
 768         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 769           is_exact = true;
 770           expected_type = dst_type;
 771         }
 772       }
 773     }
 774     // at least pass along a good guess
 775     if (expected_type == NULL) expected_type = dst_exact_type;
 776     if (expected_type == NULL) expected_type = src_declared_type;
 777     if (expected_type == NULL) expected_type = dst_declared_type;
 778 
 779     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 780     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 781   }
 782 
 783   // if a probable array type has been identified, figure out if any
 784   // of the required checks for a fast case can be elided.
 785   int flags = LIR_OpArrayCopy::all_flags;
 786 










 787   if (!src_objarray)
 788     flags &= ~LIR_OpArrayCopy::src_objarray;
 789   if (!dst_objarray)
 790     flags &= ~LIR_OpArrayCopy::dst_objarray;
 791 
 792   if (!x->arg_needs_null_check(0))
 793     flags &= ~LIR_OpArrayCopy::src_null_check;
 794   if (!x->arg_needs_null_check(2))
 795     flags &= ~LIR_OpArrayCopy::dst_null_check;
 796 
 797 
 798   if (expected_type != NULL) {
 799     Value length_limit = NULL;
 800 
 801     IfOp* ifop = length->as_IfOp();
 802     if (ifop != NULL) {
 803       // look for expressions like min(v, a.length) which ends up as
 804       //   x > y ? y : x  or  x >= y ? y : x
 805       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 806           ifop->x() == ifop->fval() &&


1404       case T_FLOAT:
1405         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1406         break;
1407       case T_LONG:
1408       case T_DOUBLE:
1409         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1410         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1411         break;
1412       case T_OBJECT:
1413         if (c->as_jobject() != other->as_jobject()) continue;
1414         break;
1415       default:
1416         break;
1417       }
1418       return _reg_for_constants.at(i);
1419     }
1420   }
1421 
1422   LIR_Opr result = new_register(t);
1423   __ move((LIR_Opr)c, result);
1424   _constants.append(c);
1425   _reg_for_constants.append(result);


1426   return result;
1427 }
1428 






1429 //------------------------field access--------------------------------------
1430 
1431 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1432   assert(x->number_of_arguments() == 4, "wrong type");
1433   LIRItem obj   (x->argument_at(0), this);  // object
1434   LIRItem offset(x->argument_at(1), this);  // offset of field
1435   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1436   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1437   assert(obj.type()->tag() == objectTag, "invalid type");
1438 
1439   // In 64bit the type can be long, sparc doesn't have this assert
1440   // assert(offset.type()->tag() == intTag, "invalid type");
1441 
1442   assert(cmp.type()->tag() == type->tag(), "invalid type");
1443   assert(val.type()->tag() == type->tag(), "invalid type");
1444 
1445   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1446                                             obj, offset, cmp, val);
1447   set_result(x, result);
1448 }


1507       value.load_byte_item();
1508     } else  {
1509       value.load_item();
1510     }
1511   } else {
1512     value.load_for_store(field_type);
1513   }
1514 
1515   set_no_result(x);
1516 
1517 #ifndef PRODUCT
1518   if (PrintNotLoaded && needs_patching) {
1519     tty->print_cr("   ###class not loaded at store_%s bci %d",
1520                   x->is_static() ?  "static" : "field", x->printable_bci());
1521   }
1522 #endif
1523 
1524   if (x->needs_null_check() &&
1525       (needs_patching ||
1526        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1527     // Emit an explicit null check because the offset is too large.
1528     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1529     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1530     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);












1531   }
1532 
1533   DecoratorSet decorators = IN_HEAP;
1534   if (is_volatile) {
1535     decorators |= MO_SEQ_CST;
1536   }
1537   if (needs_patching) {
1538     decorators |= C1_NEEDS_PATCHING;
1539   }
1540 
1541   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1542                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1543 }
1544 































































































































1545 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1546   assert(x->is_pinned(),"");


1547   bool needs_range_check = x->compute_needs_range_check();
1548   bool use_length = x->length() != NULL;
1549   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1550   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||

1551                                          !get_jobject_constant(x->value())->is_null_object() ||
1552                                          x->should_profile());
1553 
1554   LIRItem array(x->array(), this);
1555   LIRItem index(x->index(), this);
1556   LIRItem value(x->value(), this);
1557   LIRItem length(this);
1558 
1559   array.load_item();
1560   index.load_nonconstant();
1561 
1562   if (use_length && needs_range_check) {
1563     length.set_instruction(x->length());
1564     length.load_item();
1565 
1566   }
1567   if (needs_store_check || x->check_boolean()) {


1568     value.load_item();
1569   } else {
1570     value.load_for_store(x->elt_type());
1571   }
1572 
1573   set_no_result(x);
1574 
1575   // the CodeEmitInfo must be duplicated for each different
1576   // LIR-instruction because spilling can occur anywhere between two
1577   // instructions and so the debug information must be different
1578   CodeEmitInfo* range_check_info = state_for(x);
1579   CodeEmitInfo* null_check_info = NULL;
1580   if (x->needs_null_check()) {
1581     null_check_info = new CodeEmitInfo(range_check_info);
1582   }
1583 
1584   if (GenerateRangeChecks && needs_range_check) {
1585     if (use_length) {
1586       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1587       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1588     } else {
1589       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1590       // range_check also does the null check
1591       null_check_info = NULL;
1592     }
1593   }
1594 
1595   if (GenerateArrayStoreCheck && needs_store_check) {
1596     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1597     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1598   }
1599 
1600   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1601   if (x->check_boolean()) {
1602     decorators |= C1_MASK_BOOLEAN;



























1603   }
1604 
1605   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1606                   NULL, null_check_info);
1607 }
1608 
1609 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1610                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1611                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1612   decorators |= ACCESS_READ;
1613   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1614   if (access.is_raw()) {
1615     _barrier_set->BarrierSetC1::load_at(access, result);
1616   } else {
1617     _barrier_set->load_at(access, result);
1618   }
1619 }
1620 
1621 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1622                                LIR_Opr addr, LIR_Opr result) {
1623   decorators |= ACCESS_READ;
1624   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1625   access.set_resolved_addr(addr);
1626   if (access.is_raw()) {


1676   decorators |= ACCESS_WRITE;
1677   // Atomic operations are SEQ_CST by default
1678   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1679   LIRAccess access(this, decorators, base, offset, type);
1680   if (access.is_raw()) {
1681     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1682   } else {
1683     return _barrier_set->atomic_add_at(access, value);
1684   }
1685 }
1686 
1687 LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
1688   // Use stronger ACCESS_WRITE|ACCESS_READ by default.
1689   if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
1690     decorators |= ACCESS_READ | ACCESS_WRITE;
1691   }
1692 
1693   return _barrier_set->resolve(this, decorators, obj);
1694 }
1695 










































































1696 void LIRGenerator::do_LoadField(LoadField* x) {
1697   bool needs_patching = x->needs_patching();
1698   bool is_volatile = x->field()->is_volatile();
1699   BasicType field_type = x->field_type();
1700 
1701   CodeEmitInfo* info = NULL;
1702   if (needs_patching) {
1703     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1704     info = state_for(x, x->state_before());
1705   } else if (x->needs_null_check()) {
1706     NullCheck* nc = x->explicit_null_check();
1707     if (nc == NULL) {
1708       info = state_for(x);
1709     } else {
1710       info = state_for(nc);
1711     }
1712   }
1713 
1714   LIRItem object(x->obj(), this);
1715 
1716   object.load_item();
1717 
1718 #ifndef PRODUCT
1719   if (PrintNotLoaded && needs_patching) {
1720     tty->print_cr("   ###class not loaded at load_%s bci %d",
1721                   x->is_static() ?  "static" : "field", x->printable_bci());
1722   }
1723 #endif
1724 





1725   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1726   if (x->needs_null_check() &&
1727       (needs_patching ||
1728        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1729        stress_deopt)) {
1730     LIR_Opr obj = object.result();
1731     if (stress_deopt) {
1732       obj = new_register(T_OBJECT);
1733       __ move(LIR_OprFact::oopConst(NULL), obj);
1734     }
1735     // Emit an explicit null check because the offset is too large.
1736     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1737     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1738     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1739   }
1740 
1741   DecoratorSet decorators = IN_HEAP;
1742   if (is_volatile) {
1743     decorators |= MO_SEQ_CST;
1744   }
1745   if (needs_patching) {
1746     decorators |= C1_NEEDS_PATCHING;
1747   }
1748 
1749   LIR_Opr result = rlock_result(x, field_type);
1750   access_load_at(decorators, field_type,
1751                  object, LIR_OprFact::intConst(x->offset()), result,
1752                  info ? new CodeEmitInfo(info) : NULL, info);










1753 }
1754 
1755 
1756 //------------------------java.nio.Buffer.checkIndex------------------------
1757 
1758 // int java.nio.Buffer.checkIndex(int)
1759 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1760   // NOTE: by the time we are in checkIndex() we are guaranteed that
1761   // the buffer is non-null (because checkIndex is package-private and
1762   // only called from within other methods in the buffer).
1763   assert(x->number_of_arguments() == 2, "wrong type");
1764   LIRItem buf  (x->argument_at(0), this);
1765   LIRItem index(x->argument_at(1), this);
1766   buf.load_item();
1767   index.load_item();
1768 
1769   LIR_Opr result = rlock_result(x);
1770   if (GenerateRangeChecks) {
1771     CodeEmitInfo* info = state_for(x);
1772     CodeStub* stub = new RangeCheckStub(info, index.result());


1847       __ move(LIR_OprFact::oopConst(NULL), obj);
1848       __ null_check(obj, new CodeEmitInfo(null_check_info));
1849     }
1850   }
1851 
1852   if (GenerateRangeChecks && needs_range_check) {
1853     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1854       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
1855     } else if (use_length) {
1856       // TODO: use a (modified) version of array_range_check that does not require a
1857       //       constant length to be loaded to a register
1858       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1859       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1860     } else {
1861       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1862       // The range check performs the null check, so clear it out for the load
1863       null_check_info = NULL;
1864     }
1865   }
1866 
1867   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1868 
1869   LIR_Opr result = rlock_result(x, x->elt_type());
1870   access_load_at(decorators, x->elt_type(),
1871                  array, index.result(), result,
1872                  NULL, null_check_info);
























1873 }
1874 























1875 
1876 void LIRGenerator::do_NullCheck(NullCheck* x) {
1877   if (x->can_trap()) {
1878     LIRItem value(x->obj(), this);
1879     value.load_item();
1880     CodeEmitInfo* info = state_for(x);
1881     __ null_check(value.result(), info);
1882   }
1883 }
1884 
1885 
1886 void LIRGenerator::do_TypeCast(TypeCast* x) {
1887   LIRItem value(x->obj(), this);
1888   value.load_item();
1889   // the result is the same as from the node we are casting
1890   set_result(x, value.result());
1891 }
1892 
1893 
1894 void LIRGenerator::do_Throw(Throw* x) {


2700   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2701     profile_parameters(x);
2702     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2703     increment_invocation_counter(info);
2704   }
2705 
2706   // all blocks with a successor must end with an unconditional jump
2707   // to the successor even if they are consecutive
2708   __ jump(x->default_sux());
2709 }
2710 
2711 
2712 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2713   // construct our frame and model the production of incoming pointer
2714   // to the OSR buffer.
2715   __ osr_entry(LIR_Assembler::osrBufferPointer());
2716   LIR_Opr result = rlock_result(x);
2717   __ move(LIR_Assembler::osrBufferPointer(), result);
2718 }
2719 


















2720 
2721 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2722   assert(args->length() == arg_list->length(),
2723          "args=%d, arg_list=%d", args->length(), arg_list->length());
2724   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2725     LIRItem* param = args->at(i);
2726     LIR_Opr loc = arg_list->at(i);
2727     if (loc->is_register()) {
2728       param->load_item_force(loc);
2729     } else {
2730       LIR_Address* addr = loc->as_address_ptr();
2731       param->load_for_store(addr->type());
2732       if (addr->type() == T_OBJECT) {
2733         __ move_wide(param->result(), addr);
2734       } else
2735         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2736           __ unaligned_move(param->result(), addr);
2737         } else {
2738           __ move(param->result(), addr);
2739         }
2740     }
2741   }
2742 
2743   if (x->has_receiver()) {
2744     LIRItem* receiver = args->at(0);
2745     LIR_Opr loc = arg_list->at(0);
2746     if (loc->is_register()) {
2747       receiver->load_item_force(loc);
2748     } else {
2749       assert(loc->is_address(), "just checking");
2750       receiver->load_for_store(T_OBJECT);
2751       __ move_wide(receiver->result(), loc->as_address_ptr());
2752     }
2753   }
2754 }
2755 
2756 
2757 // Visits all arguments, returns appropriate items without loading them
2758 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2759   LIRItemList* argument_items = new LIRItemList();
2760   if (x->has_receiver()) {


2901   __ move(tmp, reg);
2902 }
2903 
2904 
2905 
2906 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2907 void LIRGenerator::do_IfOp(IfOp* x) {
2908 #ifdef ASSERT
2909   {
2910     ValueTag xtag = x->x()->type()->tag();
2911     ValueTag ttag = x->tval()->type()->tag();
2912     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2913     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2914     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2915   }
2916 #endif
2917 
2918   LIRItem left(x->x(), this);
2919   LIRItem right(x->y(), this);
2920   left.load_item();
2921   if (can_inline_as_constant(right.value())) {
2922     right.dont_load_item();
2923   } else {

2924     right.load_item();
2925   }
2926 
2927   LIRItem t_val(x->tval(), this);
2928   LIRItem f_val(x->fval(), this);
2929   t_val.dont_load_item();
2930   f_val.dont_load_item();
2931   LIR_Opr reg = rlock_result(x);
2932 
2933   __ cmp(lir_cond(x->cond()), left.result(), right.result());
2934   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));


























































2935 }
2936 
2937 #ifdef JFR_HAVE_INTRINSICS
2938 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
2939   CodeEmitInfo* info = state_for(x);
2940   CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
2941 
2942   assert(info != NULL, "must have info");
2943   LIRItem arg(x->argument_at(0), this);
2944 
2945   arg.load_item();
2946   LIR_Opr klass = new_register(T_METADATA);
2947   __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
2948   LIR_Opr id = new_register(T_LONG);
2949   ByteSize offset = KLASS_TRACE_ID_OFFSET;
2950   LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
2951 
2952   __ move(trace_id_addr, id);
2953   __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
2954   __ store(id, trace_id_addr);




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"
  37 #include "ci/ciValueArrayKlass.hpp"
  38 #include "ci/ciValueKlass.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/c1/barrierSetC1.hpp"
  41 #include "runtime/arguments.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/vm_version.hpp"
  45 #include "utilities/bitMap.inline.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 #ifdef ASSERT
  49 #define __ gen()->lir(__FILE__, __LINE__)->
  50 #else
  51 #define __ gen()->lir()->
  52 #endif
  53 
  54 #ifndef PATCHED_ADDR
  55 #define PATCHED_ADDR  (max_jint)
  56 #endif
  57 
  58 void PhiResolverState::reset() {


 194   ResolveNode* source = source_node(src);
 195   source->append(destination_node(dest));
 196 }
 197 
 198 
 199 //--------------------------------------------------------------
 200 // LIRItem
 201 
 202 void LIRItem::set_result(LIR_Opr opr) {
 203   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 204   value()->set_operand(opr);
 205 
 206   if (opr->is_virtual()) {
 207     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
 208   }
 209 
 210   _result = opr;
 211 }
 212 
 213 void LIRItem::load_item() {
 214   assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code");
 215 
 216   if (result()->is_illegal()) {
 217     // update the items result
 218     _result = value()->operand();
 219   }
 220   if (!result()->is_register()) {
 221     LIR_Opr reg = _gen->new_register(value()->type());
 222     __ move(result(), reg);
 223     if (result()->is_constant()) {
 224       _result = reg;
 225     } else {
 226       set_result(reg);
 227     }
 228   }
 229 }
 230 
 231 
 232 void LIRItem::load_for_store(BasicType type) {
 233   if (_gen->can_store_as_constant(value(), type)) {
 234     _result = value()->operand();
 235     if (!_result->is_constant()) {


 623     assert(right_op != result_op, "malformed");
 624     __ move(left_op, result_op);
 625     left_op = result_op;
 626   }
 627 
 628   switch(code) {
 629     case Bytecodes::_iand:
 630     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 631 
 632     case Bytecodes::_ior:
 633     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 634 
 635     case Bytecodes::_ixor:
 636     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 637 
 638     default: ShouldNotReachHere();
 639   }
 640 }
 641 
 642 
 643 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
 644                                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) {
 645   if (!GenerateSynchronizationCode) return;
 646   // for slow path, use debug info for state after successful locking
 647   CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch);
 648   __ load_stack_address_monitor(monitor_no, lock);
 649   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 650   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub);
 651 }
 652 
 653 
 654 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 655   if (!GenerateSynchronizationCode) return;
 656   // setup registers
 657   LIR_Opr hdr = lock;
 658   lock = new_hdr;
 659   CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
 660   __ load_stack_address_monitor(monitor_no, lock);
 661   __ unlock_object(hdr, object, lock, scratch, slow_path);
 662 }
 663 
 664 #ifndef PRODUCT
 665 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 666   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 667     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 668   } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
 669     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 670   }


 772       if (src_type != NULL) {
 773         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 774           is_exact = true;
 775           expected_type = dst_type;
 776         }
 777       }
 778     }
 779     // at least pass along a good guess
 780     if (expected_type == NULL) expected_type = dst_exact_type;
 781     if (expected_type == NULL) expected_type = src_declared_type;
 782     if (expected_type == NULL) expected_type = dst_declared_type;
 783 
 784     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 785     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 786   }
 787 
 788   // if a probable array type has been identified, figure out if any
 789   // of the required checks for a fast case can be elided.
 790   int flags = LIR_OpArrayCopy::all_flags;
 791 
 792   if (!src->is_loaded_flattened_array() && !dst->is_loaded_flattened_array()) {
 793     flags &= ~LIR_OpArrayCopy::always_slow_path;
 794   }
 795   if (!src->maybe_flattened_array()) {
 796     flags &= ~LIR_OpArrayCopy::src_valuetype_check;
 797   }
 798   if (!dst->maybe_flattened_array() && !dst->maybe_null_free_array()) {
 799     flags &= ~LIR_OpArrayCopy::dst_valuetype_check;
 800   }
 801 
 802   if (!src_objarray)
 803     flags &= ~LIR_OpArrayCopy::src_objarray;
 804   if (!dst_objarray)
 805     flags &= ~LIR_OpArrayCopy::dst_objarray;
 806 
 807   if (!x->arg_needs_null_check(0))
 808     flags &= ~LIR_OpArrayCopy::src_null_check;
 809   if (!x->arg_needs_null_check(2))
 810     flags &= ~LIR_OpArrayCopy::dst_null_check;
 811 
 812 
 813   if (expected_type != NULL) {
 814     Value length_limit = NULL;
 815 
 816     IfOp* ifop = length->as_IfOp();
 817     if (ifop != NULL) {
 818       // look for expressions like min(v, a.length) which ends up as
 819       //   x > y ? y : x  or  x >= y ? y : x
 820       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 821           ifop->x() == ifop->fval() &&


1419       case T_FLOAT:
1420         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1421         break;
1422       case T_LONG:
1423       case T_DOUBLE:
1424         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1425         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1426         break;
1427       case T_OBJECT:
1428         if (c->as_jobject() != other->as_jobject()) continue;
1429         break;
1430       default:
1431         break;
1432       }
1433       return _reg_for_constants.at(i);
1434     }
1435   }
1436 
1437   LIR_Opr result = new_register(t);
1438   __ move((LIR_Opr)c, result);
1439   if (!in_conditional_code()) {
1440     _constants.append(c);
1441     _reg_for_constants.append(result);
1442   }
1443   return result;
1444 }
1445 
1446 void LIRGenerator::set_in_conditional_code(bool v) {
1447   assert(v != _in_conditional_code, "must change state");
1448   _in_conditional_code = v;
1449 }
1450 
1451 
1452 //------------------------field access--------------------------------------
1453 
1454 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1455   assert(x->number_of_arguments() == 4, "wrong type");
1456   LIRItem obj   (x->argument_at(0), this);  // object
1457   LIRItem offset(x->argument_at(1), this);  // offset of field
1458   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1459   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1460   assert(obj.type()->tag() == objectTag, "invalid type");
1461 
1462   // In 64bit the type can be long, sparc doesn't have this assert
1463   // assert(offset.type()->tag() == intTag, "invalid type");
1464 
1465   assert(cmp.type()->tag() == type->tag(), "invalid type");
1466   assert(val.type()->tag() == type->tag(), "invalid type");
1467 
1468   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1469                                             obj, offset, cmp, val);
1470   set_result(x, result);
1471 }


1530       value.load_byte_item();
1531     } else  {
1532       value.load_item();
1533     }
1534   } else {
1535     value.load_for_store(field_type);
1536   }
1537 
1538   set_no_result(x);
1539 
1540 #ifndef PRODUCT
1541   if (PrintNotLoaded && needs_patching) {
1542     tty->print_cr("   ###class not loaded at store_%s bci %d",
1543                   x->is_static() ?  "static" : "field", x->printable_bci());
1544   }
1545 #endif
1546 
1547   if (x->needs_null_check() &&
1548       (needs_patching ||
1549        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1550     if (needs_patching && x->field()->is_flattenable()) {
1551       // We are storing a field of type "QT;" into holder class H, but H is not yet
1552       // loaded. (If H had been loaded, then T must also have already been loaded
1553       // due to the "Q" signature, and needs_patching would be false).
1554       assert(!x->field()->holder()->is_loaded(), "must be");
1555       // We don't know the offset of this field. Let's deopt and recompile.
1556       CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
1557                                           Deoptimization::Reason_unloaded,
1558                                           Deoptimization::Action_make_not_entrant);
1559       __ branch(lir_cond_always, T_ILLEGAL, stub);
1560     } else {
1561       // Emit an explicit null check because the offset is too large.
1562       // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1563       // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1564       __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1565     }
1566   }
1567 
1568   DecoratorSet decorators = IN_HEAP;
1569   if (is_volatile) {
1570     decorators |= MO_SEQ_CST;
1571   }
1572   if (needs_patching) {
1573     decorators |= C1_NEEDS_PATCHING;
1574   }
1575 
1576   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1577                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1578 }
1579 
1580 // FIXME -- I can't find any other way to pass an address to access_load_at().
1581 class TempResolvedAddress: public Instruction {
1582  public:
1583   TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1584     set_operand(addr);
1585   }
1586   virtual void input_values_do(ValueVisitor*) {}
1587   virtual void visit(InstructionVisitor* v)   {}
1588   virtual const char* name() const  { return "TempResolvedAddress"; }
1589 };
1590 
1591 void LIRGenerator::access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item) {
1592   // Find the starting address of the source (inside the array)
1593   ciType* array_type = array.value()->declared_type();
1594   ciValueArrayKlass* value_array_klass = array_type->as_value_array_klass();
1595   assert(value_array_klass->is_loaded(), "must be");
1596 
1597   ciValueKlass* elem_klass = value_array_klass->element_klass()->as_value_klass();
1598   int array_header_size = value_array_klass->array_header_in_bytes();
1599   int shift = value_array_klass->log2_element_size();
1600 
1601 #ifndef _LP64
1602   LIR_Opr index_op = new_register(T_INT);
1603   // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
1604   // the top (shift+1) bits of index_op must be zero, or
1605   // else throw ArrayIndexOutOfBoundsException
1606   if (index.result()->is_constant()) {
1607     jint const_index = index.result()->as_jint();
1608     __ move(LIR_OprFact::intConst(const_index << shift), index_op);
1609   } else {
1610     __ shift_left(index_op, shift, index.result());
1611   }
1612 #else
1613   LIR_Opr index_op = new_register(T_LONG);
1614   if (index.result()->is_constant()) {
1615     jint const_index = index.result()->as_jint();
1616     __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1617   } else {
1618     __ convert(Bytecodes::_i2l, index.result(), index_op);
1619     // Need to shift manually, as LIR_Address can scale only up to 3.
1620     __ shift_left(index_op, shift, index_op);
1621   }
1622 #endif
1623 
1624   LIR_Opr elm_op = new_pointer_register();
1625   LIR_Address* elm_address = new LIR_Address(array.result(), index_op, array_header_size, T_ADDRESS);
1626   __ leal(LIR_OprFact::address(elm_address), elm_op);
1627 
1628   for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1629     ciField* inner_field = elem_klass->nonstatic_field_at(i);
1630     assert(!inner_field->is_flattened(), "flattened fields must have been expanded");
1631     int obj_offset = inner_field->offset();
1632     int elm_offset = obj_offset - elem_klass->first_field_offset(); // object header is not stored in array.
1633 
1634     BasicType field_type = inner_field->type()->basic_type();
1635     switch (field_type) {
1636     case T_BYTE:
1637     case T_BOOLEAN:
1638     case T_SHORT:
1639     case T_CHAR:
1640      field_type = T_INT;
1641       break;
1642     default:
1643       break;
1644     }
1645 
1646     LIR_Opr temp = new_register(field_type);
1647     TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1648     LIRItem elm_item(elm_resolved_addr, this);
1649 
1650     DecoratorSet decorators = IN_HEAP;
1651     if (is_load) {
1652       access_load_at(decorators, field_type,
1653                      elm_item, LIR_OprFact::intConst(elm_offset), temp,
1654                      NULL, NULL);
1655       access_store_at(decorators, field_type,
1656                       obj_item, LIR_OprFact::intConst(obj_offset), temp,
1657                       NULL, NULL);
1658     } else {
1659       access_load_at(decorators, field_type,
1660                      obj_item, LIR_OprFact::intConst(obj_offset), temp,
1661                      NULL, NULL);
1662       access_store_at(decorators, field_type,
1663                       elm_item, LIR_OprFact::intConst(elm_offset), temp,
1664                       NULL, NULL);
1665     }
1666   }
1667 }
1668 
1669 void LIRGenerator::check_flattened_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) {
1670   LIR_Opr tmp = new_register(T_METADATA);
1671   __ check_flattened_array(array, value, tmp, slow_path);
1672 }
1673 
1674 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) {
1675   LabelObj* L_end = new LabelObj();
1676   LIR_Opr tmp = new_register(T_METADATA);
1677   __ check_null_free_array(array.result(), tmp);
1678   __ branch(lir_cond_equal, T_ILLEGAL, L_end->label());
1679   __ null_check(value.result(), info);
1680   __ branch_destination(L_end->label());
1681 }
1682 
1683 bool LIRGenerator::needs_flattened_array_store_check(StoreIndexed* x) {
1684   if (ValueArrayFlatten && x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
1685     ciType* type = x->value()->declared_type();
1686     if (type != NULL && type->is_klass()) {
1687       ciKlass* klass = type->as_klass();
1688       if (klass->is_loaded() &&
1689           !(klass->is_valuetype() && klass->as_value_klass()->flatten_array()) &&
1690           !klass->is_java_lang_Object() &&
1691           !klass->is_interface()) {
1692         // This is known to be a non-flattenable object. If the array is flattened,
1693         // it will be caught by the code generated by array_store_check().
1694         return false;
1695       }
1696     }
1697     // We're not 100% sure, so let's do the flattened_array_store_check.
1698     return true;
1699   }
1700   return false;
1701 }
1702 
1703 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) {
1704   return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array();
1705 }
1706 
1707 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1708   assert(x->is_pinned(),"");
1709   assert(x->elt_type() != T_ARRAY, "never used");
1710   bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array();
1711   bool needs_range_check = x->compute_needs_range_check();
1712   bool use_length = x->length() != NULL;
1713   bool obj_store = x->elt_type() == T_OBJECT;
1714   bool needs_store_check = obj_store && !(is_loaded_flattened_array && x->is_exact_flattened_array_store()) &&
1715                                         (x->value()->as_Constant() == NULL ||
1716                                          !get_jobject_constant(x->value())->is_null_object() ||
1717                                          x->should_profile());
1718 
1719   LIRItem array(x->array(), this);
1720   LIRItem index(x->index(), this);
1721   LIRItem value(x->value(), this);
1722   LIRItem length(this);
1723 
1724   array.load_item();
1725   index.load_nonconstant();
1726 
1727   if (use_length && needs_range_check) {
1728     length.set_instruction(x->length());
1729     length.load_item();

1730   }
1731 
1732   if (needs_store_check || x->check_boolean()
1733       || is_loaded_flattened_array || needs_flattened_array_store_check(x) || needs_null_free_array_store_check(x)) {
1734     value.load_item();
1735   } else {
1736     value.load_for_store(x->elt_type());
1737   }
1738 
1739   set_no_result(x);
1740 
1741   // the CodeEmitInfo must be duplicated for each different
1742   // LIR-instruction because spilling can occur anywhere between two
1743   // instructions and so the debug information must be different
1744   CodeEmitInfo* range_check_info = state_for(x);
1745   CodeEmitInfo* null_check_info = NULL;
1746   if (x->needs_null_check()) {
1747     null_check_info = new CodeEmitInfo(range_check_info);
1748   }
1749 
1750   if (GenerateRangeChecks && needs_range_check) {
1751     if (use_length) {
1752       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1753       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1754     } else {
1755       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1756       // range_check also does the null check
1757       null_check_info = NULL;
1758     }
1759   }
1760 
1761   if (GenerateArrayStoreCheck && needs_store_check) {
1762     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1763     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1764   }
1765 
1766   if (is_loaded_flattened_array) {
1767     if (!x->value()->is_never_null()) {
1768       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1769     }
1770     access_flattened_array(false, array, index, value);
1771   } else {
1772     StoreFlattenedArrayStub* slow_path = NULL;
1773 
1774     if (needs_flattened_array_store_check(x)) {
1775       // Check if we indeed have a flattened array
1776       index.load_item();
1777       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x));
1778       check_flattened_array(array.result(), value.result(), slow_path);
1779       set_in_conditional_code(true);
1780     } else if (needs_null_free_array_store_check(x)) {
1781       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1782       check_null_free_array(array, value, info);
1783     }
1784 
1785     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1786     if (x->check_boolean()) {
1787       decorators |= C1_MASK_BOOLEAN;
1788     }
1789 
1790     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1791                     NULL, null_check_info);
1792     if (slow_path != NULL) {
1793       __ branch_destination(slow_path->continuation());
1794       set_in_conditional_code(false);
1795     }
1796   }



1797 }
1798 
1799 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1800                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1801                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1802   decorators |= ACCESS_READ;
1803   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1804   if (access.is_raw()) {
1805     _barrier_set->BarrierSetC1::load_at(access, result);
1806   } else {
1807     _barrier_set->load_at(access, result);
1808   }
1809 }
1810 
1811 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1812                                LIR_Opr addr, LIR_Opr result) {
1813   decorators |= ACCESS_READ;
1814   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1815   access.set_resolved_addr(addr);
1816   if (access.is_raw()) {


1866   decorators |= ACCESS_WRITE;
1867   // Atomic operations are SEQ_CST by default
1868   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1869   LIRAccess access(this, decorators, base, offset, type);
1870   if (access.is_raw()) {
1871     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1872   } else {
1873     return _barrier_set->atomic_add_at(access, value);
1874   }
1875 }
1876 
1877 LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
1878   // Use stronger ACCESS_WRITE|ACCESS_READ by default.
1879   if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
1880     decorators |= ACCESS_READ | ACCESS_WRITE;
1881   }
1882 
1883   return _barrier_set->resolve(this, decorators, obj);
1884 }
1885 
1886 Constant* LIRGenerator::flattenable_load_field_prolog(LoadField* x, CodeEmitInfo* info) {
1887   ciField* field = x->field();
1888   ciInstanceKlass* holder = field->holder();
1889   Constant* default_value = NULL;
1890 
1891   // Unloaded "QV;" klasses are represented by a ciInstanceKlass
1892   bool field_type_unloaded = field->type()->is_instance_klass() && !field->type()->as_instance_klass()->is_loaded();
1893 
1894   // Check for edge cases (1), (2) and (3) for getstatic and getfield
1895   bool deopt = false;
1896   bool need_default = false;
1897   if (field->is_static()) {
1898       // (1) holder is unloaded -- no problem: it will be loaded by patching, and field offset will be determined.
1899       // No check needed here.
1900 
1901     if (field_type_unloaded) {
1902       // (2) field type is unloaded -- problem: we don't know what the default value is. Let's deopt.
1903       //                               FIXME: consider getting the default value in patching code.
1904       deopt = true;
1905     } else {
1906       need_default = true;
1907     }
1908 
1909       // (3) field is not flattened -- we don't care: static fields are never flattened.
1910       // No check needed here.
1911   } else {
1912     if (!holder->is_loaded()) {
1913       // (1) holder is unloaded -- problem: we needed the field offset back in GraphBuilder::access_field()
1914       //                           FIXME: consider getting field offset in patching code (but only if the field
1915       //                           type was loaded at compilation time).
1916       deopt = true;
1917     } else if (field_type_unloaded) {
1918       // (2) field type is unloaded -- problem: we don't know whether it's flattened or not. Let's deopt
1919       deopt = true;
1920     } else if (!field->is_flattened()) {
1921       // (3) field is not flattened -- need default value in cases of uninitialized field
1922       need_default = true;
1923     }
1924   }
1925 
1926   if (deopt) {
1927     assert(!need_default, "deopt and need_default cannot both be true");
1928     assert(x->needs_patching(), "must be");
1929     assert(info != NULL, "must be");
1930     CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
1931                                         Deoptimization::Reason_unloaded,
1932                                         Deoptimization::Action_make_not_entrant);
1933     __ branch(lir_cond_always, T_ILLEGAL, stub);
1934   } else if (need_default) {
1935     assert(!field_type_unloaded, "must be");
1936     assert(field->type()->is_valuetype(), "must be");
1937     ciValueKlass* value_klass = field->type()->as_value_klass();
1938     assert(value_klass->is_loaded(), "must be");
1939 
1940     if (field->is_static() && holder->is_loaded()) {
1941       ciInstance* mirror = field->holder()->java_mirror();
1942       ciObject* val = mirror->field_value(field).as_object();
1943       if (val->is_null_object()) {
1944         // This is a non-nullable static field, but it's not initialized.
1945         // We need to do a null check, and replace it with the default value.
1946       } else {
1947         // No need to perform null check on this static field
1948         need_default = false;
1949       }
1950     }
1951 
1952     if (need_default) {
1953       default_value = new Constant(new InstanceConstant(value_klass->default_value_instance()));
1954     }
1955   }
1956 
1957   return default_value;
1958 }
1959 
1960 void LIRGenerator::do_LoadField(LoadField* x) {
1961   bool needs_patching = x->needs_patching();
1962   bool is_volatile = x->field()->is_volatile();
1963   BasicType field_type = x->field_type();
1964 
1965   CodeEmitInfo* info = NULL;
1966   if (needs_patching) {
1967     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1968     info = state_for(x, x->state_before());
1969   } else if (x->needs_null_check()) {
1970     NullCheck* nc = x->explicit_null_check();
1971     if (nc == NULL) {
1972       info = state_for(x);
1973     } else {
1974       info = state_for(nc);
1975     }
1976   }
1977 
1978   LIRItem object(x->obj(), this);
1979 
1980   object.load_item();
1981 
1982 #ifndef PRODUCT
1983   if (PrintNotLoaded && needs_patching) {
1984     tty->print_cr("   ###class not loaded at load_%s bci %d",
1985                   x->is_static() ?  "static" : "field", x->printable_bci());
1986   }
1987 #endif
1988 
1989   Constant* default_value = NULL;
1990   if (x->field()->is_flattenable()) {
1991     default_value = flattenable_load_field_prolog(x, info);
1992   }
1993 
1994   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1995   if (x->needs_null_check() &&
1996       (needs_patching ||
1997        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1998        stress_deopt)) {
1999     LIR_Opr obj = object.result();
2000     if (stress_deopt) {
2001       obj = new_register(T_OBJECT);
2002       __ move(LIR_OprFact::oopConst(NULL), obj);
2003     }
2004     // Emit an explicit null check because the offset is too large.
2005     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
2006     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
2007     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
2008   }
2009 
2010   DecoratorSet decorators = IN_HEAP;
2011   if (is_volatile) {
2012     decorators |= MO_SEQ_CST;
2013   }
2014   if (needs_patching) {
2015     decorators |= C1_NEEDS_PATCHING;
2016   }
2017 
2018   LIR_Opr result = rlock_result(x, field_type);
2019   access_load_at(decorators, field_type,
2020                  object, LIR_OprFact::intConst(x->offset()), result,
2021                  info ? new CodeEmitInfo(info) : NULL, info);
2022 
2023   if (default_value != NULL) {
2024     LabelObj* L_end = new LabelObj();
2025     __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL));
2026     __ branch(lir_cond_notEqual, T_OBJECT, L_end->label());
2027     set_in_conditional_code(true);
2028     __ move(load_constant(default_value), result);
2029     __ branch_destination(L_end->label());
2030     set_in_conditional_code(false);
2031   }
2032 }
2033 
2034 
2035 //------------------------java.nio.Buffer.checkIndex------------------------
2036 
2037 // int java.nio.Buffer.checkIndex(int)
2038 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
2039   // NOTE: by the time we are in checkIndex() we are guaranteed that
2040   // the buffer is non-null (because checkIndex is package-private and
2041   // only called from within other methods in the buffer).
2042   assert(x->number_of_arguments() == 2, "wrong type");
2043   LIRItem buf  (x->argument_at(0), this);
2044   LIRItem index(x->argument_at(1), this);
2045   buf.load_item();
2046   index.load_item();
2047 
2048   LIR_Opr result = rlock_result(x);
2049   if (GenerateRangeChecks) {
2050     CodeEmitInfo* info = state_for(x);
2051     CodeStub* stub = new RangeCheckStub(info, index.result());


2126       __ move(LIR_OprFact::oopConst(NULL), obj);
2127       __ null_check(obj, new CodeEmitInfo(null_check_info));
2128     }
2129   }
2130 
2131   if (GenerateRangeChecks && needs_range_check) {
2132     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2133       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
2134     } else if (use_length) {
2135       // TODO: use a (modified) version of array_range_check that does not require a
2136       //       constant length to be loaded to a register
2137       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2138       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
2139     } else {
2140       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2141       // The range check performs the null check, so clear it out for the load
2142       null_check_info = NULL;
2143     }
2144   }
2145 
2146   if (x->vt() != NULL) {
2147     assert(x->array()->is_loaded_flattened_array(), "must be");
2148     // Find the destination address (of the NewValueTypeInstance).
2149     LIR_Opr obj = x->vt()->operand();
2150     LIRItem obj_item(x->vt(), this);
2151 
2152     access_flattened_array(true, array, index, obj_item);
2153     set_no_result(x);
2154   } else {
2155     LIR_Opr result = rlock_result(x, x->elt_type());
2156     LoadFlattenedArrayStub* slow_path = NULL;
2157 
2158     if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
2159       index.load_item();
2160       // if we are loading from flattened array, load it using a runtime call
2161       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x));
2162       check_flattened_array(array.result(), LIR_OprFact::illegalOpr, slow_path);
2163       set_in_conditional_code(true);
2164     }
2165 
2166     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2167     access_load_at(decorators, x->elt_type(),
2168                    array, index.result(), result,
2169                    NULL, null_check_info);
2170 
2171     if (slow_path != NULL) {
2172       __ branch_destination(slow_path->continuation());
2173       set_in_conditional_code(false);
2174     }
2175   }
2176 }
2177 
2178 void LIRGenerator::do_WithField(WithField* x) {
2179   // This happens only when a class X uses the withfield bytecode to refer to
2180   // an inline class V, where V has not yet been loaded. This is not a common
2181   // case. Let's just deoptimize.
2182   CodeEmitInfo* info = state_for(x, x->state_before());
2183   CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
2184                                       Deoptimization::Reason_unloaded,
2185                                       Deoptimization::Action_make_not_entrant);
2186   __ branch(lir_cond_always, T_ILLEGAL, stub);
2187   LIR_Opr reg = rlock_result(x, T_OBJECT);
2188   __ move(LIR_OprFact::oopConst(NULL), reg);
2189 }
2190 
2191 void LIRGenerator::do_DefaultValue(DefaultValue* x) {
2192   // Same as withfield above. Let's deoptimize.
2193   CodeEmitInfo* info = state_for(x, x->state_before());
2194   CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
2195                                       Deoptimization::Reason_unloaded,
2196                                       Deoptimization::Action_make_not_entrant);
2197   __ branch(lir_cond_always, T_ILLEGAL, stub);
2198   LIR_Opr reg = rlock_result(x, T_OBJECT);
2199   __ move(LIR_OprFact::oopConst(NULL), reg);
2200 }
2201 
2202 void LIRGenerator::do_NullCheck(NullCheck* x) {
2203   if (x->can_trap()) {
2204     LIRItem value(x->obj(), this);
2205     value.load_item();
2206     CodeEmitInfo* info = state_for(x);
2207     __ null_check(value.result(), info);
2208   }
2209 }
2210 
2211 
2212 void LIRGenerator::do_TypeCast(TypeCast* x) {
2213   LIRItem value(x->obj(), this);
2214   value.load_item();
2215   // the result is the same as from the node we are casting
2216   set_result(x, value.result());
2217 }
2218 
2219 
2220 void LIRGenerator::do_Throw(Throw* x) {


3026   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3027     profile_parameters(x);
3028     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
3029     increment_invocation_counter(info);
3030   }
3031 
3032   // all blocks with a successor must end with an unconditional jump
3033   // to the successor even if they are consecutive
3034   __ jump(x->default_sux());
3035 }
3036 
3037 
3038 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3039   // construct our frame and model the production of incoming pointer
3040   // to the OSR buffer.
3041   __ osr_entry(LIR_Assembler::osrBufferPointer());
3042   LIR_Opr result = rlock_result(x);
3043   __ move(LIR_Assembler::osrBufferPointer(), result);
3044 }
3045 
3046 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) {
3047   if (loc->is_register()) {
3048     param->load_item_force(loc);
3049   } else {
3050     LIR_Address* addr = loc->as_address_ptr();
3051     param->load_for_store(addr->type());
3052     assert(addr->type() != T_VALUETYPE, "not supported yet");
3053     if (addr->type() == T_OBJECT) {
3054       __ move_wide(param->result(), addr);
3055     } else {
3056       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3057         __ unaligned_move(param->result(), addr);
3058       } else {
3059         __ move(param->result(), addr);
3060       }
3061     }
3062   }
3063 }
3064 
3065 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3066   assert(args->length() == arg_list->length(),
3067          "args=%d, arg_list=%d", args->length(), arg_list->length());
3068   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3069     LIRItem* param = args->at(i);
3070     LIR_Opr loc = arg_list->at(i);
3071     invoke_load_one_argument(param, loc);













3072   }
3073 
3074   if (x->has_receiver()) {
3075     LIRItem* receiver = args->at(0);
3076     LIR_Opr loc = arg_list->at(0);
3077     if (loc->is_register()) {
3078       receiver->load_item_force(loc);
3079     } else {
3080       assert(loc->is_address(), "just checking");
3081       receiver->load_for_store(T_OBJECT);
3082       __ move_wide(receiver->result(), loc->as_address_ptr());
3083     }
3084   }
3085 }
3086 
3087 
3088 // Visits all arguments, returns appropriate items without loading them
3089 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3090   LIRItemList* argument_items = new LIRItemList();
3091   if (x->has_receiver()) {


3232   __ move(tmp, reg);
3233 }
3234 
3235 
3236 
3237 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3238 void LIRGenerator::do_IfOp(IfOp* x) {
3239 #ifdef ASSERT
3240   {
3241     ValueTag xtag = x->x()->type()->tag();
3242     ValueTag ttag = x->tval()->type()->tag();
3243     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3244     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3245     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3246   }
3247 #endif
3248 
3249   LIRItem left(x->x(), this);
3250   LIRItem right(x->y(), this);
3251   left.load_item();
3252   if (can_inline_as_constant(right.value()) && !x->substitutability_check()) {
3253     right.dont_load_item();
3254   } else {
3255     // substitutability_check() needs to use right as a base register.
3256     right.load_item();
3257   }
3258 
3259   LIRItem t_val(x->tval(), this);
3260   LIRItem f_val(x->fval(), this);
3261   t_val.dont_load_item();
3262   f_val.dont_load_item();

3263 
3264   if (x->substitutability_check()) {
3265     substitutability_check(x, left, right, t_val, f_val);
3266   } else {
3267     LIR_Opr reg = rlock_result(x);
3268     __ cmp(lir_cond(x->cond()), left.result(), right.result());
3269     __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3270   }
3271 }
3272 
3273 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) {
3274   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3275   bool is_acmpeq = (x->cond() == If::eql);
3276   LIR_Opr equal_result     = is_acmpeq ? t_val.result() : f_val.result();
3277   LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result();
3278   LIR_Opr result = rlock_result(x);
3279   CodeEmitInfo* info = state_for(x, x->state_before());
3280 
3281   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3282 }
3283 
3284 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) {
3285   LIR_Opr equal_result     = LIR_OprFact::intConst(1);
3286   LIR_Opr not_equal_result = LIR_OprFact::intConst(0);
3287   LIR_Opr result = new_register(T_INT);
3288   CodeEmitInfo* info = state_for(x, x->state_before());
3289 
3290   substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info);
3291 
3292   assert(x->cond() == If::eql || x->cond() == If::neq, "must be");
3293   __ cmp(lir_cond(x->cond()), result, equal_result);
3294 }
3295 
3296 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right,
3297                                                  LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result,
3298                                                  CodeEmitInfo* info) {
3299   LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
3300   LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
3301   LIR_Opr left_klass_op = LIR_OprFact::illegalOpr;
3302   LIR_Opr right_klass_op = LIR_OprFact::illegalOpr;
3303 
3304   ciKlass* left_klass  = left_val ->as_loaded_klass_or_null();
3305   ciKlass* right_klass = right_val->as_loaded_klass_or_null();
3306 
3307   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
3308       !left_klass->is_valuetype() || !right_klass->is_valuetype()) {
3309     init_temps_for_substitutability_check(tmp1, tmp2);
3310   }
3311 
3312   if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) {
3313     // No need to load klass -- the operands are statically known to be the same value klass.
3314   } else {
3315     BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA;
3316     left_klass_op = new_register(t_klass);
3317     right_klass_op = new_register(t_klass);
3318   }
3319 
3320   CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info);
3321   __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result,
3322                             tmp1, tmp2,
3323                             left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path);
3324 }
3325 
3326 #ifdef JFR_HAVE_INTRINSICS
3327 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3328   CodeEmitInfo* info = state_for(x);
3329   CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3330 
3331   assert(info != NULL, "must have info");
3332   LIRItem arg(x->argument_at(0), this);
3333 
3334   arg.load_item();
3335   LIR_Opr klass = new_register(T_METADATA);
3336   __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
3337   LIR_Opr id = new_register(T_LONG);
3338   ByteSize offset = KLASS_TRACE_ID_OFFSET;
3339   LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3340 
3341   __ move(trace_id_addr, id);
3342   __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3343   __ store(id, trace_id_addr);


< prev index next >