< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"


  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/stubRoutines.hpp"
  38 #include "utilities/bitMap.inline.hpp"
  39 #include "utilities/macros.hpp"
  40 #if INCLUDE_ALL_GCS
  41 #include "gc_implementation/g1/heapRegion.hpp"
  42 #endif // INCLUDE_ALL_GCS
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 #ifndef PATCHED_ADDR
  51 #define PATCHED_ADDR  (max_jint)
  52 #endif
  53 
  54 void PhiResolverState::reset(int max_vregs) {
  55   // Initialize array sizes


1212 
1213   const int referent_offset = java_lang_ref_Reference::referent_offset;
1214   guarantee(referent_offset > 0, "referent offset not initialized");
1215 
1216   assert(x->number_of_arguments() == 1, "wrong type");
1217 
1218   LIRItem reference(x->argument_at(0), this);
1219   reference.load_item();
1220 
1221   // need to perform the null check on the reference objecy
1222   CodeEmitInfo* info = NULL;
1223   if (x->needs_null_check()) {
1224     info = state_for(x);
1225   }
1226 
1227   LIR_Address* referent_field_adr =
1228     new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1229 
1230   LIR_Opr result = rlock_result(x);
1231 








1232   __ load(referent_field_adr, result, info);
1233 
1234   // Register the value in the referent field with the pre-barrier
1235   pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1236               result /* pre_val */,
1237               false  /* do_load */,
1238               false  /* patch */,
1239               NULL   /* info */);
1240 }
1241 
1242 // Example: clazz.isInstance(object)
1243 void LIRGenerator::do_isInstance(Intrinsic* x) {
1244   assert(x->number_of_arguments() == 2, "wrong type");
1245 
1246   // TODO could try to substitute this node with an equivalent InstanceOf
1247   // if clazz is known to be a constant Class. This will pick up newly found
1248   // constants after HIR construction. I'll leave this to a future change.
1249 
1250   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1251   // could follow the aastore example in a future change.


1405   }
1406 
1407   LIR_Opr result = new_register(t);
1408   __ move((LIR_Opr)c, result);
1409   _constants.append(c);
1410   _reg_for_constants.append(result);
1411   return result;
1412 }
1413 
1414 // Various barriers
1415 
1416 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1417                                bool do_load, bool patch, CodeEmitInfo* info) {
1418   // Do the pre-write barrier, if any.
1419   switch (_bs->kind()) {
1420 #if INCLUDE_ALL_GCS
1421     case BarrierSet::G1SATBCT:
1422     case BarrierSet::G1SATBCTLogging:
1423       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1424       break;





1425 #endif // INCLUDE_ALL_GCS
1426     case BarrierSet::CardTableModRef:
1427     case BarrierSet::CardTableExtension:
1428       // No pre barriers
1429       break;
1430     case BarrierSet::ModRef:
1431     case BarrierSet::Other:
1432       // No pre barriers
1433       break;
1434     default      :
1435       ShouldNotReachHere();
1436 
1437   }
1438 }
1439 
1440 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1441   switch (_bs->kind()) {
1442 #if INCLUDE_ALL_GCS
1443     case BarrierSet::G1SATBCT:
1444     case BarrierSet::G1SATBCTLogging:
1445       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1446       break;



1447 #endif // INCLUDE_ALL_GCS
1448     case BarrierSet::CardTableModRef:
1449     case BarrierSet::CardTableExtension:
1450       CardTableModRef_post_barrier(addr,  new_val);
1451       break;
1452     case BarrierSet::ModRef:
1453     case BarrierSet::Other:
1454       // No post barriers
1455       break;
1456     default      :
1457       ShouldNotReachHere();
1458     }
1459 }
1460 
1461 ////////////////////////////////////////////////////////////////////////
1462 #if INCLUDE_ALL_GCS
1463 
1464 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1465                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1466   // First we test whether marking is in progress.


1590     if (!address->index()->is_valid() && address->disp() == 0) {
1591       __ move(address->base(), ptr);
1592     } else {
1593       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1594       __ leal(addr, ptr);
1595     }
1596     addr = ptr;
1597   }
1598   assert(addr->is_register(), "must be a register at this point");
1599 
1600 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1601   CardTableModRef_post_barrier_helper(addr, card_table_base);
1602 #else
1603   LIR_Opr tmp = new_pointer_register();
1604   if (TwoOperandLIRForm) {
1605     __ move(addr, tmp);
1606     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1607   } else {
1608     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1609   }





1610   if (can_inline_as_constant(card_table_base)) {
1611     __ move(LIR_OprFact::intConst(0),
1612               new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1613   } else {
1614     __ move(LIR_OprFact::intConst(0),
1615               new LIR_Address(tmp, load_constant(card_table_base),
1616                               T_BYTE));
1617   }
1618 #endif
1619 }
1620 
1621 
1622 //------------------------field access--------------------------------------
1623 
1624 // Comment copied form templateTable_i486.cpp
1625 // ----------------------------------------------------------------------------
1626 // Volatile variables demand their effects be made known to all CPU's in
1627 // order.  Store buffers on most chips allow reads & writes to reorder; the
1628 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1629 // memory barrier (i.e., it's not sufficient that the interpreter does not


1788       __ move(LIR_OprFact::oopConst(NULL), obj);
1789     }
1790     // Emit an explicit null check because the offset is too large.
1791     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1792     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1793     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1794   }
1795 
1796   LIR_Opr reg = rlock_result(x, field_type);
1797   LIR_Address* address;
1798   if (needs_patching) {
1799     // we need to patch the offset in the instruction so don't allow
1800     // generate_address to try to be smart about emitting the -1.
1801     // Otherwise the patching code won't know how to find the
1802     // instruction to patch.
1803     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1804   } else {
1805     address = generate_address(object.result(), x->offset(), field_type);
1806   }
1807 

















1808   if (is_volatile && !needs_patching) {
1809     volatile_field_load(address, reg, info);
1810   } else {
1811     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1812     __ load(address, reg, info, patch_code);
1813   }
1814 
1815   if (is_volatile && os::is_MP()) {
1816     __ membar_acquire();
1817   }

1818 }
1819 
1820 
1821 //------------------------java.nio.Buffer.checkIndex------------------------
1822 
1823 // int java.nio.Buffer.checkIndex(int)
1824 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1825   // NOTE: by the time we are in checkIndex() we are guaranteed that
1826   // the buffer is non-null (because checkIndex is package-private and
1827   // only called from within other methods in the buffer).
1828   assert(x->number_of_arguments() == 2, "wrong type");
1829   LIRItem buf  (x->argument_at(0), this);
1830   LIRItem index(x->argument_at(1), this);
1831   buf.load_item();
1832   index.load_item();
1833 
1834   LIR_Opr result = rlock_result(x);
1835   if (GenerateRangeChecks) {
1836     CodeEmitInfo* info = state_for(x);
1837     CodeStub* stub = new RangeCheckStub(info, index.result(), true);


1914   }
1915 
1916   // emit array address setup early so it schedules better
1917   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1918 
1919   if (GenerateRangeChecks && needs_range_check) {
1920     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1921       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1922     } else if (use_length) {
1923       // TODO: use a (modified) version of array_range_check that does not require a
1924       //       constant length to be loaded to a register
1925       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1926       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1927     } else {
1928       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1929       // The range check performs the null check, so clear it out for the load
1930       null_check_info = NULL;
1931     }
1932   }
1933 
1934   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);











1935 }
1936 
1937 
1938 void LIRGenerator::do_NullCheck(NullCheck* x) {
1939   if (x->can_trap()) {
1940     LIRItem value(x->obj(), this);
1941     value.load_item();
1942     CodeEmitInfo* info = state_for(x);
1943     __ null_check(value.result(), info);
1944   }
1945 }
1946 
1947 
1948 void LIRGenerator::do_TypeCast(TypeCast* x) {
1949   LIRItem value(x->obj(), this);
1950   value.load_item();
1951   // the result is the same as from the node we are casting
1952   set_result(x, value.result());
1953 }
1954 


2088     }
2089   }
2090   // At this point base is a long non-constant
2091   // Index is a long register or a int constant.
2092   // We allow the constant to stay an int because that would allow us a more compact encoding by
2093   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2094   // move it into a register first.
2095   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2096   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2097                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2098 #endif
2099 
2100   BasicType dst_type = x->basic_type();
2101 
2102   LIR_Address* addr;
2103   if (index_op->is_constant()) {
2104     assert(log2_scale == 0, "must not have a scale");
2105     assert(index_op->type() == T_INT, "only int constants supported");
2106     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2107   } else {
2108 #ifdef X86
2109     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2110 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2111     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2112 #else
2113     if (index_op->is_illegal() || log2_scale == 0) {
2114       addr = new LIR_Address(base_op, index_op, dst_type);
2115     } else {
2116       LIR_Opr tmp = new_pointer_register();
2117       __ shift_left(index_op, log2_scale, tmp);
2118       addr = new LIR_Address(base_op, tmp, dst_type);
2119     }
2120 #endif
2121   }
2122 
2123   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2124     __ unaligned_move(addr, reg);
2125   } else {
2126     if (dst_type == T_OBJECT && x->is_wide()) {
2127       __ move_wide(addr, reg);
2128     } else {


2203       index_op = tmp;
2204     }
2205   }
2206 
2207   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2208 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2209   __ move(value.result(), addr);
2210 }
2211 
2212 
2213 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2214   BasicType type = x->basic_type();
2215   LIRItem src(x->object(), this);
2216   LIRItem off(x->offset(), this);
2217 
2218   off.load_item();
2219   src.load_item();
2220 
2221   LIR_Opr value = rlock_result(x, x->basic_type());
2222 








2223   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2224 
2225 #if INCLUDE_ALL_GCS
2226   // We might be reading the value of the referent field of a
2227   // Reference object in order to attach it back to the live
2228   // object graph. If G1 is enabled then we need to record
2229   // the value that is being returned in an SATB log buffer.
2230   //
2231   // We need to generate code similar to the following...
2232   //
2233   // if (offset == java_lang_ref_Reference::referent_offset) {
2234   //   if (src != NULL) {
2235   //     if (klass(src)->reference_type() != REF_NONE) {
2236   //       pre_barrier(..., value, ...);
2237   //     }
2238   //   }
2239   // }
2240 
2241   if (UseG1GC && type == T_OBJECT) {
2242     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2243     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2244     bool gen_source_check = true;    // Assume we need to check the src object for null.
2245     bool gen_type_check = true;      // Assume we need to check the reference_type.
2246 
2247     if (off.is_constant()) {
2248       jlong off_con = (off.type()->is_int() ?
2249                         (jlong) off.get_jint_constant() :
2250                         off.get_jlong_constant());
2251 
2252 
2253       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2254         // The constant offset is something other than referent_offset.
2255         // We can skip generating/checking the remaining guards and
2256         // skip generation of the code stub.
2257         gen_pre_barrier = false;
2258       } else {
2259         // The constant offset is the same as referent_offset -
2260         // we do not need to generate a runtime offset check.
2261         gen_offset_check = false;




  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  37 #include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/bitMap.inline.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_ALL_GCS
  43 #include "gc_implementation/g1/heapRegion.hpp"
  44 #endif // INCLUDE_ALL_GCS
  45 
  46 #ifdef ASSERT
  47 #define __ gen()->lir(__FILE__, __LINE__)->
  48 #else
  49 #define __ gen()->lir()->
  50 #endif
  51 
  52 #ifndef PATCHED_ADDR
  53 #define PATCHED_ADDR  (max_jint)
  54 #endif
  55 
  56 void PhiResolverState::reset(int max_vregs) {
  57   // Initialize array sizes


1214 
1215   const int referent_offset = java_lang_ref_Reference::referent_offset;
1216   guarantee(referent_offset > 0, "referent offset not initialized");
1217 
1218   assert(x->number_of_arguments() == 1, "wrong type");
1219 
1220   LIRItem reference(x->argument_at(0), this);
1221   reference.load_item();
1222 
1223   // need to perform the null check on the reference objecy
1224   CodeEmitInfo* info = NULL;
1225   if (x->needs_null_check()) {
1226     info = state_for(x);
1227   }
1228 
1229   LIR_Address* referent_field_adr =
1230     new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1231 
1232   LIR_Opr result = rlock_result(x);
1233 
1234 #if INCLUDE_ALL_GCS
1235   if (UseShenandoahGC) {
1236     LIR_Opr tmp = new_register(T_OBJECT);
1237     __ load(referent_field_adr, tmp, info);
1238     tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp);
1239     __ move(tmp, result);
1240   } else
1241 #endif
1242   __ load(referent_field_adr, result, info);
1243 
1244   // Register the value in the referent field with the pre-barrier
1245   pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1246               result /* pre_val */,
1247               false  /* do_load */,
1248               false  /* patch */,
1249               NULL   /* info */);
1250 }
1251 
1252 // Example: clazz.isInstance(object)
1253 void LIRGenerator::do_isInstance(Intrinsic* x) {
1254   assert(x->number_of_arguments() == 2, "wrong type");
1255 
1256   // TODO could try to substitute this node with an equivalent InstanceOf
1257   // if clazz is known to be a constant Class. This will pick up newly found
1258   // constants after HIR construction. I'll leave this to a future change.
1259 
1260   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1261   // could follow the aastore example in a future change.


1415   }
1416 
1417   LIR_Opr result = new_register(t);
1418   __ move((LIR_Opr)c, result);
1419   _constants.append(c);
1420   _reg_for_constants.append(result);
1421   return result;
1422 }
1423 
1424 // Various barriers
1425 
1426 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1427                                bool do_load, bool patch, CodeEmitInfo* info) {
1428   // Do the pre-write barrier, if any.
1429   switch (_bs->kind()) {
1430 #if INCLUDE_ALL_GCS
1431     case BarrierSet::G1SATBCT:
1432     case BarrierSet::G1SATBCTLogging:
1433       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1434       break;
1435     case BarrierSet::ShenandoahBarrierSet:
1436       if (ShenandoahSATBBarrier) {
1437         G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1438       }
1439       break;
1440 #endif // INCLUDE_ALL_GCS
1441     case BarrierSet::CardTableModRef:
1442     case BarrierSet::CardTableExtension:
1443       // No pre barriers
1444       break;
1445     case BarrierSet::ModRef:
1446     case BarrierSet::Other:
1447       // No pre barriers
1448       break;
1449     default      :
1450       ShouldNotReachHere();
1451 
1452   }
1453 }
1454 
1455 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1456   switch (_bs->kind()) {
1457 #if INCLUDE_ALL_GCS
1458     case BarrierSet::G1SATBCT:
1459     case BarrierSet::G1SATBCTLogging:
1460       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1461       break;
1462     case BarrierSet::ShenandoahBarrierSet:
1463       ShenandoahBarrierSetC1::bsc1()->storeval_barrier(this, new_val, NULL, false);
1464       break;
1465 #endif // INCLUDE_ALL_GCS
1466     case BarrierSet::CardTableModRef:
1467     case BarrierSet::CardTableExtension:
1468       CardTableModRef_post_barrier(addr,  new_val);
1469       break;
1470     case BarrierSet::ModRef:
1471     case BarrierSet::Other:
1472       // No post barriers
1473       break;
1474     default      :
1475       ShouldNotReachHere();
1476     }
1477 }
1478 
1479 ////////////////////////////////////////////////////////////////////////
1480 #if INCLUDE_ALL_GCS
1481 
1482 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1483                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1484   // First we test whether marking is in progress.


1608     if (!address->index()->is_valid() && address->disp() == 0) {
1609       __ move(address->base(), ptr);
1610     } else {
1611       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1612       __ leal(addr, ptr);
1613     }
1614     addr = ptr;
1615   }
1616   assert(addr->is_register(), "must be a register at this point");
1617 
1618 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1619   CardTableModRef_post_barrier_helper(addr, card_table_base);
1620 #else
1621   LIR_Opr tmp = new_pointer_register();
1622   if (TwoOperandLIRForm) {
1623     __ move(addr, tmp);
1624     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1625   } else {
1626     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1627   }
1628 
1629   if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
1630     __ membar_storestore();
1631   }
1632 
1633   if (can_inline_as_constant(card_table_base)) {
1634     __ move(LIR_OprFact::intConst(0),
1635               new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1636   } else {
1637     __ move(LIR_OprFact::intConst(0),
1638               new LIR_Address(tmp, load_constant(card_table_base),
1639                               T_BYTE));
1640   }
1641 #endif
1642 }
1643 
1644 
1645 //------------------------field access--------------------------------------
1646 
1647 // Comment copied form templateTable_i486.cpp
1648 // ----------------------------------------------------------------------------
1649 // Volatile variables demand their effects be made known to all CPU's in
1650 // order.  Store buffers on most chips allow reads & writes to reorder; the
1651 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1652 // memory barrier (i.e., it's not sufficient that the interpreter does not


1811       __ move(LIR_OprFact::oopConst(NULL), obj);
1812     }
1813     // Emit an explicit null check because the offset is too large.
1814     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1815     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1816     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1817   }
1818 
1819   LIR_Opr reg = rlock_result(x, field_type);
1820   LIR_Address* address;
1821   if (needs_patching) {
1822     // we need to patch the offset in the instruction so don't allow
1823     // generate_address to try to be smart about emitting the -1.
1824     // Otherwise the patching code won't know how to find the
1825     // instruction to patch.
1826     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1827   } else {
1828     address = generate_address(object.result(), x->offset(), field_type);
1829   }
1830 
1831 #if INCLUDE_ALL_GCS
1832   if (UseShenandoahGC && (field_type == T_OBJECT || field_type == T_ARRAY)) {
1833     LIR_Opr tmp = new_register(T_OBJECT);
1834     if (is_volatile && !needs_patching) {
1835       volatile_field_load(address, tmp, info);
1836     } else {
1837       LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1838       __ load(address, tmp, info, patch_code);
1839     }
1840     if (is_volatile && os::is_MP()) {
1841       __ membar_acquire();
1842     }
1843     tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp);
1844     __ move(tmp, reg);
1845   } else
1846 #endif
1847   {
1848   if (is_volatile && !needs_patching) {
1849     volatile_field_load(address, reg, info);
1850   } else {
1851     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1852     __ load(address, reg, info, patch_code);
1853   }

1854   if (is_volatile && os::is_MP()) {
1855     __ membar_acquire();
1856   }
1857   }
1858 }
1859 
1860 
1861 //------------------------java.nio.Buffer.checkIndex------------------------
1862 
1863 // int java.nio.Buffer.checkIndex(int)
1864 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1865   // NOTE: by the time we are in checkIndex() we are guaranteed that
1866   // the buffer is non-null (because checkIndex is package-private and
1867   // only called from within other methods in the buffer).
1868   assert(x->number_of_arguments() == 2, "wrong type");
1869   LIRItem buf  (x->argument_at(0), this);
1870   LIRItem index(x->argument_at(1), this);
1871   buf.load_item();
1872   index.load_item();
1873 
1874   LIR_Opr result = rlock_result(x);
1875   if (GenerateRangeChecks) {
1876     CodeEmitInfo* info = state_for(x);
1877     CodeStub* stub = new RangeCheckStub(info, index.result(), true);


1954   }
1955 
1956   // emit array address setup early so it schedules better
1957   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1958 
1959   if (GenerateRangeChecks && needs_range_check) {
1960     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1961       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1962     } else if (use_length) {
1963       // TODO: use a (modified) version of array_range_check that does not require a
1964       //       constant length to be loaded to a register
1965       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1966       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1967     } else {
1968       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1969       // The range check performs the null check, so clear it out for the load
1970       null_check_info = NULL;
1971     }
1972   }
1973 
1974   LIR_Opr result = rlock_result(x, x->elt_type());
1975 
1976 #if INCLUDE_ALL_GCS
1977   if (UseShenandoahGC && (x->elt_type() == T_OBJECT || x->elt_type() == T_ARRAY)) {
1978     LIR_Opr tmp = new_register(T_OBJECT);
1979     __ move(array_addr, tmp, null_check_info);
1980     tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp);
1981     __ move(tmp, result);
1982   } else
1983 #endif
1984   __ move(array_addr, result, null_check_info);
1985 
1986 }
1987 
1988 
1989 void LIRGenerator::do_NullCheck(NullCheck* x) {
1990   if (x->can_trap()) {
1991     LIRItem value(x->obj(), this);
1992     value.load_item();
1993     CodeEmitInfo* info = state_for(x);
1994     __ null_check(value.result(), info);
1995   }
1996 }
1997 
1998 
1999 void LIRGenerator::do_TypeCast(TypeCast* x) {
2000   LIRItem value(x->obj(), this);
2001   value.load_item();
2002   // the result is the same as from the node we are casting
2003   set_result(x, value.result());
2004 }
2005 


2139     }
2140   }
2141   // At this point base is a long non-constant
2142   // Index is a long register or a int constant.
2143   // We allow the constant to stay an int because that would allow us a more compact encoding by
2144   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2145   // move it into a register first.
2146   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2147   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2148                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2149 #endif
2150 
2151   BasicType dst_type = x->basic_type();
2152 
2153   LIR_Address* addr;
2154   if (index_op->is_constant()) {
2155     assert(log2_scale == 0, "must not have a scale");
2156     assert(index_op->type() == T_INT, "only int constants supported");
2157     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2158   } else {
2159 #if defined(X86) || defined(AARCH64)
2160     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2161 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2162     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2163 #else
2164     if (index_op->is_illegal() || log2_scale == 0) {
2165       addr = new LIR_Address(base_op, index_op, dst_type);
2166     } else {
2167       LIR_Opr tmp = new_pointer_register();
2168       __ shift_left(index_op, log2_scale, tmp);
2169       addr = new LIR_Address(base_op, tmp, dst_type);
2170     }
2171 #endif
2172   }
2173 
2174   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2175     __ unaligned_move(addr, reg);
2176   } else {
2177     if (dst_type == T_OBJECT && x->is_wide()) {
2178       __ move_wide(addr, reg);
2179     } else {


2254       index_op = tmp;
2255     }
2256   }
2257 
2258   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2259 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2260   __ move(value.result(), addr);
2261 }
2262 
2263 
2264 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2265   BasicType type = x->basic_type();
2266   LIRItem src(x->object(), this);
2267   LIRItem off(x->offset(), this);
2268 
2269   off.load_item();
2270   src.load_item();
2271 
2272   LIR_Opr value = rlock_result(x, x->basic_type());
2273 
2274 #if INCLUDE_ALL_GCS
2275   if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) {
2276     LIR_Opr tmp = new_register(T_OBJECT);
2277     get_Object_unsafe(tmp, src.result(), off.result(), type, x->is_volatile());
2278     tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp);
2279     __ move(tmp, value);
2280   } else
2281 #endif
2282   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2283 
2284 #if INCLUDE_ALL_GCS
2285   // We might be reading the value of the referent field of a
2286   // Reference object in order to attach it back to the live
2287   // object graph. If G1 is enabled then we need to record
2288   // the value that is being returned in an SATB log buffer.
2289   //
2290   // We need to generate code similar to the following...
2291   //
2292   // if (offset == java_lang_ref_Reference::referent_offset) {
2293   //   if (src != NULL) {
2294   //     if (klass(src)->reference_type() != REF_NONE) {
2295   //       pre_barrier(..., value, ...);
2296   //     }
2297   //   }
2298   // }
2299 
2300   if ((UseShenandoahGC || UseG1GC) && type == T_OBJECT) {
2301     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2302     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2303     bool gen_source_check = true;    // Assume we need to check the src object for null.
2304     bool gen_type_check = true;      // Assume we need to check the reference_type.
2305 
2306     if (off.is_constant()) {
2307       jlong off_con = (off.type()->is_int() ?
2308                         (jlong) off.get_jint_constant() :
2309                         off.get_jlong_constant());
2310 
2311 
2312       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2313         // The constant offset is something other than referent_offset.
2314         // We can skip generating/checking the remaining guards and
2315         // skip generation of the code stub.
2316         gen_pre_barrier = false;
2317       } else {
2318         // The constant offset is the same as referent_offset -
2319         // we do not need to generate a runtime offset check.
2320         gen_offset_check = false;


< prev index next >