< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"


  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/stubRoutines.hpp"
  38 #include "utilities/bitMap.inline.hpp"
  39 #include "utilities/macros.hpp"
  40 #if INCLUDE_ALL_GCS
  41 #include "gc_implementation/g1/heapRegion.hpp"
  42 #endif // INCLUDE_ALL_GCS
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 #ifndef PATCHED_ADDR
  51 #define PATCHED_ADDR  (max_jint)
  52 #endif
  53 
  54 void PhiResolverState::reset(int max_vregs) {
  55   // Initialize array sizes


1212 
1213   const int referent_offset = java_lang_ref_Reference::referent_offset;
1214   guarantee(referent_offset > 0, "referent offset not initialized");
1215 
1216   assert(x->number_of_arguments() == 1, "wrong type");
1217 
1218   LIRItem reference(x->argument_at(0), this);
1219   reference.load_item();
1220 
1221   // need to perform the null check on the reference objecy
1222   CodeEmitInfo* info = NULL;
1223   if (x->needs_null_check()) {
1224     info = state_for(x);
1225   }
1226 
1227   LIR_Address* referent_field_adr =
1228     new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1229 
1230   LIR_Opr result = rlock_result(x);
1231 









1232   __ load(referent_field_adr, result, info);
1233 
1234   // Register the value in the referent field with the pre-barrier
1235   pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1236               result /* pre_val */,
1237               false  /* do_load */,
1238               false  /* patch */,
1239               NULL   /* info */);
1240 }
1241 
1242 // Example: clazz.isInstance(object)
1243 void LIRGenerator::do_isInstance(Intrinsic* x) {
1244   assert(x->number_of_arguments() == 2, "wrong type");
1245 
1246   // TODO could try to substitute this node with an equivalent InstanceOf
1247   // if clazz is known to be a constant Class. This will pick up newly found
1248   // constants after HIR construction. I'll leave this to a future change.
1249 
1250   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1251   // could follow the aastore example in a future change.


1405   }
1406 
1407   LIR_Opr result = new_register(t);
1408   __ move((LIR_Opr)c, result);
1409   _constants.append(c);
1410   _reg_for_constants.append(result);
1411   return result;
1412 }
1413 
1414 // Various barriers
1415 
1416 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1417                                bool do_load, bool patch, CodeEmitInfo* info) {
1418   // Do the pre-write barrier, if any.
1419   switch (_bs->kind()) {
1420 #if INCLUDE_ALL_GCS
1421     case BarrierSet::G1SATBCT:
1422     case BarrierSet::G1SATBCTLogging:
1423       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1424       break;





1425 #endif // INCLUDE_ALL_GCS
1426     case BarrierSet::CardTableModRef:
1427     case BarrierSet::CardTableExtension:
1428       // No pre barriers
1429       break;
1430     case BarrierSet::ModRef:
1431     case BarrierSet::Other:
1432       // No pre barriers
1433       break;
1434     default      :
1435       ShouldNotReachHere();
1436 
1437   }
1438 }
1439 
1440 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1441   switch (_bs->kind()) {
1442 #if INCLUDE_ALL_GCS
1443     case BarrierSet::G1SATBCT:
1444     case BarrierSet::G1SATBCTLogging:
1445       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1446       break;



1447 #endif // INCLUDE_ALL_GCS
1448     case BarrierSet::CardTableModRef:
1449     case BarrierSet::CardTableExtension:
1450       CardTableModRef_post_barrier(addr,  new_val);
1451       break;
1452     case BarrierSet::ModRef:
1453     case BarrierSet::Other:
1454       // No post barriers
1455       break;
1456     default      :
1457       ShouldNotReachHere();
1458     }
1459 }
1460 
1461 ////////////////////////////////////////////////////////////////////////
1462 #if INCLUDE_ALL_GCS
1463 
1464 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1465                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1466   // First we test whether marking is in progress.


1793       __ move(LIR_OprFact::oopConst(NULL), obj);
1794     }
1795     // Emit an explicit null check because the offset is too large.
1796     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1797     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1798     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1799   }
1800 
1801   LIR_Opr reg = rlock_result(x, field_type);
1802   LIR_Address* address;
1803   if (needs_patching) {
1804     // we need to patch the offset in the instruction so don't allow
1805     // generate_address to try to be smart about emitting the -1.
1806     // Otherwise the patching code won't know how to find the
1807     // instruction to patch.
1808     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1809   } else {
1810     address = generate_address(object.result(), x->offset(), field_type);
1811   }
1812 

















1813   if (is_volatile && !needs_patching) {
1814     volatile_field_load(address, reg, info);
1815   } else {
1816     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1817     __ load(address, reg, info, patch_code);
1818   }
1819 
1820   if (is_volatile && os::is_MP()) {
1821     __ membar_acquire();
1822   }

1823 }
1824 
1825 
1826 //------------------------java.nio.Buffer.checkIndex------------------------
1827 
1828 // int java.nio.Buffer.checkIndex(int)
1829 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1830   // NOTE: by the time we are in checkIndex() we are guaranteed that
1831   // the buffer is non-null (because checkIndex is package-private and
1832   // only called from within other methods in the buffer).
1833   assert(x->number_of_arguments() == 2, "wrong type");
1834   LIRItem buf  (x->argument_at(0), this);
1835   LIRItem index(x->argument_at(1), this);
1836   buf.load_item();
1837   index.load_item();
1838 
1839   LIR_Opr result = rlock_result(x);
1840   if (GenerateRangeChecks) {
1841     CodeEmitInfo* info = state_for(x);
1842     CodeStub* stub = new RangeCheckStub(info, index.result(), true);


1919   }
1920 
1921   // emit array address setup early so it schedules better
1922   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1923 
1924   if (GenerateRangeChecks && needs_range_check) {
1925     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1926       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1927     } else if (use_length) {
1928       // TODO: use a (modified) version of array_range_check that does not require a
1929       //       constant length to be loaded to a register
1930       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1931       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1932     } else {
1933       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1934       // The range check performs the null check, so clear it out for the load
1935       null_check_info = NULL;
1936     }
1937   }
1938 
1939   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);












1940 }
1941 
1942 
1943 void LIRGenerator::do_NullCheck(NullCheck* x) {
1944   if (x->can_trap()) {
1945     LIRItem value(x->obj(), this);
1946     value.load_item();
1947     CodeEmitInfo* info = state_for(x);
1948     __ null_check(value.result(), info);
1949   }
1950 }
1951 
1952 
1953 void LIRGenerator::do_TypeCast(TypeCast* x) {
1954   LIRItem value(x->obj(), this);
1955   value.load_item();
1956   // the result is the same as from the node we are casting
1957   set_result(x, value.result());
1958 }
1959 


2208       index_op = tmp;
2209     }
2210   }
2211 
2212   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2213 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2214   __ move(value.result(), addr);
2215 }
2216 
2217 
2218 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2219   BasicType type = x->basic_type();
2220   LIRItem src(x->object(), this);
2221   LIRItem off(x->offset(), this);
2222 
2223   off.load_item();
2224   src.load_item();
2225 
2226   LIR_Opr value = rlock_result(x, x->basic_type());
2227 








2228   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2229 
2230 #if INCLUDE_ALL_GCS
2231   // We might be reading the value of the referent field of a
2232   // Reference object in order to attach it back to the live
2233   // object graph. If G1 is enabled then we need to record
2234   // the value that is being returned in an SATB log buffer.
2235   //
2236   // We need to generate code similar to the following...
2237   //
2238   // if (offset == java_lang_ref_Reference::referent_offset) {
2239   //   if (src != NULL) {
2240   //     if (klass(src)->reference_type() != REF_NONE) {
2241   //       pre_barrier(..., value, ...);
2242   //     }
2243   //   }
2244   // }
2245 
2246   if (UseG1GC && type == T_OBJECT) {
2247     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2248     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2249     bool gen_source_check = true;    // Assume we need to check the src object for null.
2250     bool gen_type_check = true;      // Assume we need to check the reference_type.
2251 
2252     if (off.is_constant()) {
2253       jlong off_con = (off.type()->is_int() ?
2254                         (jlong) off.get_jint_constant() :
2255                         off.get_jlong_constant());
2256 
2257 
2258       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2259         // The constant offset is something other than referent_offset.
2260         // We can skip generating/checking the remaining guards and
2261         // skip generation of the code stub.
2262         gen_pre_barrier = false;
2263       } else {
2264         // The constant offset is the same as referent_offset -
2265         // we do not need to generate a runtime offset check.
2266         gen_offset_check = false;




  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  37 #include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/bitMap.inline.hpp"
  41 #include "utilities/macros.hpp"
  42 #if INCLUDE_ALL_GCS
  43 #include "gc_implementation/g1/heapRegion.hpp"
  44 #endif // INCLUDE_ALL_GCS
  45 
  46 #ifdef ASSERT
  47 #define __ gen()->lir(__FILE__, __LINE__)->
  48 #else
  49 #define __ gen()->lir()->
  50 #endif
  51 
  52 #ifndef PATCHED_ADDR
  53 #define PATCHED_ADDR  (max_jint)
  54 #endif
  55 
  56 void PhiResolverState::reset(int max_vregs) {
  57   // Initialize array sizes


1214 
1215   const int referent_offset = java_lang_ref_Reference::referent_offset;
1216   guarantee(referent_offset > 0, "referent offset not initialized");
1217 
1218   assert(x->number_of_arguments() == 1, "wrong type");
1219 
1220   LIRItem reference(x->argument_at(0), this);
1221   reference.load_item();
1222 
1223   // need to perform the null check on the reference objecy
1224   CodeEmitInfo* info = NULL;
1225   if (x->needs_null_check()) {
1226     info = state_for(x);
1227   }
1228 
1229   LIR_Address* referent_field_adr =
1230     new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1231 
1232   LIR_Opr result = rlock_result(x);
1233 
1234 #if INCLUDE_ALL_GCS
1235   if (UseShenandoahGC) {
1236     LIR_Opr tmp = new_register(T_OBJECT);
1237     LIR_Opr addr = ShenandoahBarrierSet::barrier_set()->bsc1()->resolve_address(this, referent_field_adr, T_OBJECT, NULL);
1238     __ load(addr->as_address_ptr(), tmp, info);
1239     tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp, addr);
1240     __ move(tmp, result);
1241   } else
1242 #endif
1243   __ load(referent_field_adr, result, info);
1244 
1245   // Register the value in the referent field with the pre-barrier
1246   pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1247               result /* pre_val */,
1248               false  /* do_load */,
1249               false  /* patch */,
1250               NULL   /* info */);
1251 }
1252 
1253 // Example: clazz.isInstance(object)
1254 void LIRGenerator::do_isInstance(Intrinsic* x) {
1255   assert(x->number_of_arguments() == 2, "wrong type");
1256 
1257   // TODO could try to substitute this node with an equivalent InstanceOf
1258   // if clazz is known to be a constant Class. This will pick up newly found
1259   // constants after HIR construction. I'll leave this to a future change.
1260 
1261   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1262   // could follow the aastore example in a future change.


1416   }
1417 
1418   LIR_Opr result = new_register(t);
1419   __ move((LIR_Opr)c, result);
1420   _constants.append(c);
1421   _reg_for_constants.append(result);
1422   return result;
1423 }
1424 
1425 // Various barriers
1426 
1427 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1428                                bool do_load, bool patch, CodeEmitInfo* info) {
1429   // Do the pre-write barrier, if any.
1430   switch (_bs->kind()) {
1431 #if INCLUDE_ALL_GCS
1432     case BarrierSet::G1SATBCT:
1433     case BarrierSet::G1SATBCTLogging:
1434       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1435       break;
1436     case BarrierSet::ShenandoahBarrierSet:
1437       if (ShenandoahSATBBarrier) {
1438         G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1439       }
1440       break;
1441 #endif // INCLUDE_ALL_GCS
1442     case BarrierSet::CardTableModRef:
1443     case BarrierSet::CardTableExtension:
1444       // No pre barriers
1445       break;
1446     case BarrierSet::ModRef:
1447     case BarrierSet::Other:
1448       // No pre barriers
1449       break;
1450     default      :
1451       ShouldNotReachHere();
1452 
1453   }
1454 }
1455 
1456 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1457   switch (_bs->kind()) {
1458 #if INCLUDE_ALL_GCS
1459     case BarrierSet::G1SATBCT:
1460     case BarrierSet::G1SATBCTLogging:
1461       G1SATBCardTableModRef_post_barrier(addr,  new_val);
1462       break;
1463     case BarrierSet::ShenandoahBarrierSet:
1464       ShenandoahBarrierSetC1::bsc1()->storeval_barrier(this, new_val, NULL, false);
1465       break;
1466 #endif // INCLUDE_ALL_GCS
1467     case BarrierSet::CardTableModRef:
1468     case BarrierSet::CardTableExtension:
1469       CardTableModRef_post_barrier(addr,  new_val);
1470       break;
1471     case BarrierSet::ModRef:
1472     case BarrierSet::Other:
1473       // No post barriers
1474       break;
1475     default      :
1476       ShouldNotReachHere();
1477     }
1478 }
1479 
1480 ////////////////////////////////////////////////////////////////////////
1481 #if INCLUDE_ALL_GCS
1482 
1483 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1484                                                      bool do_load, bool patch, CodeEmitInfo* info) {
1485   // First we test whether marking is in progress.


1812       __ move(LIR_OprFact::oopConst(NULL), obj);
1813     }
1814     // Emit an explicit null check because the offset is too large.
1815     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1816     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1817     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1818   }
1819 
1820   LIR_Opr reg = rlock_result(x, field_type);
1821   LIR_Address* address;
1822   if (needs_patching) {
1823     // we need to patch the offset in the instruction so don't allow
1824     // generate_address to try to be smart about emitting the -1.
1825     // Otherwise the patching code won't know how to find the
1826     // instruction to patch.
1827     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1828   } else {
1829     address = generate_address(object.result(), x->offset(), field_type);
1830   }
1831 
1832 #if INCLUDE_ALL_GCS
1833   if (UseShenandoahGC && (field_type == T_OBJECT || field_type == T_ARRAY)) {
1834     LIR_Opr tmp = new_register(T_OBJECT);
1835     LIR_Opr addr = ShenandoahBarrierSet::barrier_set()->bsc1()->resolve_address(this, address, field_type, needs_patching ? info : NULL);
1836     if (is_volatile) {
1837       volatile_field_load(addr->as_address_ptr(), tmp, info);
1838     } else {
1839       __ load(addr->as_address_ptr(), tmp, info);
1840     }
1841     if (is_volatile && os::is_MP()) {
1842       __ membar_acquire();
1843     }
1844     tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp, addr);
1845     __ move(tmp, reg);
1846   } else
1847 #endif
1848   {
1849   if (is_volatile && !needs_patching) {
1850     volatile_field_load(address, reg, info);
1851   } else {
1852     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1853     __ load(address, reg, info, patch_code);
1854   }

1855   if (is_volatile && os::is_MP()) {
1856     __ membar_acquire();
1857   }
1858   }
1859 }
1860 
1861 
1862 //------------------------java.nio.Buffer.checkIndex------------------------
1863 
1864 // int java.nio.Buffer.checkIndex(int)
1865 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1866   // NOTE: by the time we are in checkIndex() we are guaranteed that
1867   // the buffer is non-null (because checkIndex is package-private and
1868   // only called from within other methods in the buffer).
1869   assert(x->number_of_arguments() == 2, "wrong type");
1870   LIRItem buf  (x->argument_at(0), this);
1871   LIRItem index(x->argument_at(1), this);
1872   buf.load_item();
1873   index.load_item();
1874 
1875   LIR_Opr result = rlock_result(x);
1876   if (GenerateRangeChecks) {
1877     CodeEmitInfo* info = state_for(x);
1878     CodeStub* stub = new RangeCheckStub(info, index.result(), true);


1955   }
1956 
1957   // emit array address setup early so it schedules better
1958   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1959 
1960   if (GenerateRangeChecks && needs_range_check) {
1961     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1962       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1963     } else if (use_length) {
1964       // TODO: use a (modified) version of array_range_check that does not require a
1965       //       constant length to be loaded to a register
1966       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1967       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1968     } else {
1969       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1970       // The range check performs the null check, so clear it out for the load
1971       null_check_info = NULL;
1972     }
1973   }
1974 
1975   LIR_Opr result = rlock_result(x, x->elt_type());
1976 
1977 #if INCLUDE_ALL_GCS
1978   if (UseShenandoahGC && (x->elt_type() == T_OBJECT || x->elt_type() == T_ARRAY)) {
1979     LIR_Opr tmp = new_register(T_OBJECT);
1980     LIR_Opr addr = ShenandoahBarrierSet::barrier_set()->bsc1()->resolve_address(this, array_addr, x->elt_type(), NULL);
1981     __ move(addr->as_address_ptr(), tmp, null_check_info);
1982     tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp, addr);
1983     __ move(tmp, result);
1984   } else
1985 #endif
1986   __ move(array_addr, result, null_check_info);
1987 
1988 }
1989 
1990 
1991 void LIRGenerator::do_NullCheck(NullCheck* x) {
1992   if (x->can_trap()) {
1993     LIRItem value(x->obj(), this);
1994     value.load_item();
1995     CodeEmitInfo* info = state_for(x);
1996     __ null_check(value.result(), info);
1997   }
1998 }
1999 
2000 
2001 void LIRGenerator::do_TypeCast(TypeCast* x) {
2002   LIRItem value(x->obj(), this);
2003   value.load_item();
2004   // the result is the same as from the node we are casting
2005   set_result(x, value.result());
2006 }
2007 


2256       index_op = tmp;
2257     }
2258   }
2259 
2260   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2261 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2262   __ move(value.result(), addr);
2263 }
2264 
2265 
2266 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2267   BasicType type = x->basic_type();
2268   LIRItem src(x->object(), this);
2269   LIRItem off(x->offset(), this);
2270 
2271   off.load_item();
2272   src.load_item();
2273 
2274   LIR_Opr value = rlock_result(x, x->basic_type());
2275 
2276 #if INCLUDE_ALL_GCS
2277   if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) {
2278     LIR_Opr tmp = new_register(T_OBJECT);
2279     get_Object_unsafe(tmp, src.result(), off.result(), type, x->is_volatile());
2280     tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp, LIR_OprFact::addressConst(0));
2281     __ move(tmp, value);
2282   } else
2283 #endif
2284   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2285 
2286 #if INCLUDE_ALL_GCS
2287   // We might be reading the value of the referent field of a
2288   // Reference object in order to attach it back to the live
2289   // object graph. If G1 is enabled then we need to record
2290   // the value that is being returned in an SATB log buffer.
2291   //
2292   // We need to generate code similar to the following...
2293   //
2294   // if (offset == java_lang_ref_Reference::referent_offset) {
2295   //   if (src != NULL) {
2296   //     if (klass(src)->reference_type() != REF_NONE) {
2297   //       pre_barrier(..., value, ...);
2298   //     }
2299   //   }
2300   // }
2301 
2302   if ((UseShenandoahGC || UseG1GC) && type == T_OBJECT) {
2303     bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2304     bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2305     bool gen_source_check = true;    // Assume we need to check the src object for null.
2306     bool gen_type_check = true;      // Assume we need to check the reference_type.
2307 
2308     if (off.is_constant()) {
2309       jlong off_con = (off.type()->is_int() ?
2310                         (jlong) off.get_jint_constant() :
2311                         off.get_jlong_constant());
2312 
2313 
2314       if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2315         // The constant offset is something other than referent_offset.
2316         // We can skip generating/checking the remaining guards and
2317         // skip generation of the code stub.
2318         gen_pre_barrier = false;
2319       } else {
2320         // The constant offset is the same as referent_offset -
2321         // we do not need to generate a runtime offset check.
2322         gen_offset_check = false;


< prev index next >