< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page




  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/compile.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "utilities/align.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/macros.hpp"
  48 #include "utilities/vmError.hpp"
  49 #if INCLUDE_ZGC
  50 #include "gc/z/c2/zBarrierSetC2.hpp"
  51 #endif



  52 
  53 // Portions of code courtesy of Clifford Click
  54 
  55 // Optimization - Graph Style
  56 
  57 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  58 
  59 //=============================================================================
  60 uint MemNode::size_of() const { return sizeof(*this); }
  61 
  62 const TypePtr *MemNode::adr_type() const {
  63   Node* adr = in(Address);
  64   if (adr == NULL)  return NULL; // node is dead
  65   const TypePtr* cross_check = NULL;
  66   DEBUG_ONLY(cross_check = _adr_type);
  67   return calculate_adr_type(adr->bottom_type(), cross_check);
  68 }
  69 
  70 bool MemNode::check_if_adr_maybe_raw(Node* adr) {
  71   if (adr != NULL) {


 905   Node* ld_adr = in(MemNode::Address);
 906   intptr_t ld_off = 0;
 907   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
 908   Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
 909   if (ac != NULL) {
 910     assert(ac->is_ArrayCopy(), "what kind of node can this be?");
 911 
 912     Node* mem = ac->in(TypeFunc::Memory);
 913     Node* ctl = ac->in(0);
 914     Node* src = ac->in(ArrayCopyNode::Src);
 915 
 916     if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
 917       return NULL;
 918     }
 919 
 920     LoadNode* ld = clone()->as_Load();
 921     Node* addp = in(MemNode::Address)->clone();
 922     if (ac->as_ArrayCopy()->is_clonebasic()) {
 923       assert(ld_alloc != NULL, "need an alloc");
 924       assert(addp->is_AddP(), "address must be addp");
 925       assert(addp->in(AddPNode::Base) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Base), "strange pattern");
 926       assert(addp->in(AddPNode::Address) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Address), "strange pattern");



 927       addp->set_req(AddPNode::Base, src->in(AddPNode::Base));
 928       addp->set_req(AddPNode::Address, src->in(AddPNode::Address));
 929     } else {
 930       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
 931              ac->as_ArrayCopy()->is_copyof_validated() ||
 932              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
 933       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
 934       addp->set_req(AddPNode::Base, src);
 935       addp->set_req(AddPNode::Address, src);
 936 
 937       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
 938       BasicType ary_elem  = ary_t->klass()->as_array_klass()->element_type()->basic_type();
 939       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
 940       uint shift  = exact_log2(type2aelembytes(ary_elem));
 941 
 942       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 943 #ifdef _LP64
 944       diff = phase->transform(new ConvI2LNode(diff));
 945 #endif
 946       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));


1062     }
1063 
1064     // A load from an initialization barrier can match a captured store.
1065     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1066       InitializeNode* init = st->in(0)->as_Initialize();
1067       AllocateNode* alloc = init->allocation();
1068       if ((alloc != NULL) && (alloc == ld_alloc)) {
1069         // examine a captured store value
1070         st = init->find_captured_store(ld_off, memory_size(), phase);
1071         if (st != NULL) {
1072           continue;             // take one more trip around
1073         }
1074       }
1075     }
1076 
1077     // Load boxed value from result of valueOf() call is input parameter.
1078     if (this->is_Load() && ld_adr->is_AddP() &&
1079         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1080       intptr_t ignore = 0;
1081       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);



1082       if (base != NULL && base->is_Proj() &&
1083           base->as_Proj()->_con == TypeFunc::Parms &&
1084           base->in(0)->is_CallStaticJava() &&
1085           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1086         return base->in(0)->in(TypeFunc::Parms);
1087       }
1088     }
1089 
1090     break;
1091   }
1092 
1093   return NULL;
1094 }
1095 
1096 //----------------------is_instance_field_load_with_local_phi------------------
1097 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1098   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1099       in(Address)->is_AddP() ) {
1100     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1101     // Only instances and boxed values.


1111 }
1112 
1113 //------------------------------Identity---------------------------------------
1114 // Loads are identity if previous store is to same address
1115 Node* LoadNode::Identity(PhaseGVN* phase) {
1116   // If the previous store-maker is the right kind of Store, and the store is
1117   // to the same address, then we are equal to the value stored.
1118   Node* mem = in(Memory);
1119   Node* value = can_see_stored_value(mem, phase);
1120   if( value ) {
1121     // byte, short & char stores truncate naturally.
1122     // A load has to load the truncated value which requires
1123     // some sort of masking operation and that requires an
1124     // Ideal call instead of an Identity call.
1125     if (memory_size() < BytesPerInt) {
1126       // If the input to the store does not fit with the load's result type,
1127       // it must be truncated via an Ideal call.
1128       if (!phase->type(value)->higher_equal(phase->type(this)))
1129         return this;
1130     }




















1131     // (This works even when value is a Con, but LoadNode::Value
1132     // usually runs first, producing the singleton type of the Con.)















1133     return value;
1134   }
1135 
1136   // Search for an existing data phi which was generated before for the same
1137   // instance's field to avoid infinite generation of phis in a loop.
1138   Node *region = mem->in(0);
1139   if (is_instance_field_load_with_local_phi(region)) {
1140     const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
1141     int this_index  = phase->C->get_alias_index(addr_t);
1142     int this_offset = addr_t->offset();
1143     int this_iid    = addr_t->instance_id();
1144     if (!addr_t->is_known_instance() &&
1145          addr_t->is_ptr_to_boxed_value()) {
1146       // Use _idx of address base (could be Phi node) for boxed values.
1147       intptr_t   ignore = 0;
1148       Node*      base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1149       if (base == NULL) {
1150         return this;
1151       }
1152       this_iid = base->_idx;


1214   if ((base == NULL) || base->is_Phi()) {
1215     // Push the loads from the phi that comes from valueOf up
1216     // through it to allow elimination of the loads and the recovery
1217     // of the original value. It is done in split_through_phi().
1218     return NULL;
1219   } else if (base->is_Load() ||
1220              (base->is_DecodeN() && base->in(1)->is_Load())) {
1221     // Eliminate the load of boxed value for integer types from the cache
1222     // array by deriving the value from the index into the array.
1223     // Capture the offset of the load and then reverse the computation.
1224 
1225     // Get LoadN node which loads a boxing object from 'cache' array.
1226     if (base->is_DecodeN()) {
1227       base = base->in(1);
1228     }
1229     if (!base->in(Address)->is_AddP()) {
1230       return NULL; // Complex address
1231     }
1232     AddPNode* address = base->in(Address)->as_AddP();
1233     Node* cache_base = address->in(AddPNode::Base);



1234     if ((cache_base != NULL) && cache_base->is_DecodeN()) {
1235       // Get ConP node which is static 'cache' field.
1236       cache_base = cache_base->in(1);
1237     }
1238     if ((cache_base != NULL) && cache_base->is_Con()) {
1239       const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr();
1240       if ((base_type != NULL) && base_type->is_autobox_cache()) {
1241         Node* elements[4];
1242         int shift = exact_log2(type2aelembytes(T_OBJECT));
1243         int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
1244         if (count > 0 && elements[0]->is_Con() &&
1245             (count == 1 ||
1246              (count == 2 && elements[1]->Opcode() == Op_LShiftX &&
1247                             elements[1]->in(2) == phase->intcon(shift)))) {
1248           ciObjArray* array = base_type->const_oop()->as_obj_array();
1249           // Fetch the box object cache[0] at the base of the array and get its value
1250           ciInstance* box = array->obj_at(0)->as_instance();
1251           ciInstanceKlass* ik = box->klass()->as_instance_klass();
1252           assert(ik->is_box_klass(), "sanity");
1253           assert(ik->nof_nonstatic_fields() == 1, "change following code");


1678   if (t1 == Type::TOP)  return Type::TOP;
1679   Node* adr = in(MemNode::Address);
1680   const TypePtr* tp = phase->type(adr)->isa_ptr();
1681   if (tp == NULL || tp->empty())  return Type::TOP;
1682   int off = tp->offset();
1683   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1684   Compile* C = phase->C;
1685 
1686   // Try to guess loaded type from pointer type
1687   if (tp->isa_aryptr()) {
1688     const TypeAryPtr* ary = tp->is_aryptr();
1689     const Type* t = ary->elem();
1690 
1691     // Determine whether the reference is beyond the header or not, by comparing
1692     // the offset against the offset of the start of the array's data.
1693     // Different array types begin at slightly different offsets (12 vs. 16).
1694     // We choose T_BYTE as an example base type that is least restrictive
1695     // as to alignment, which will therefore produce the smallest
1696     // possible base offset.
1697     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1698     const bool off_beyond_header = ((uint)off >= (uint)min_base_off);

1699 
1700     // Try to constant-fold a stable array element.
1701     if (FoldStableValues && !is_mismatched_access() && ary->is_stable()) {
1702       // Make sure the reference is not into the header and the offset is constant
1703       ciObject* aobj = ary->const_oop();











1704       if (aobj != NULL && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
1705         int stable_dimension = (ary->stable_dimension() > 0 ? ary->stable_dimension() - 1 : 0);
1706         const Type* con_type = Type::make_constant_from_array_element(aobj->as_array(), off,
1707                                                                       stable_dimension,
1708                                                                       memory_type(), is_unsigned());
1709         if (con_type != NULL) {
1710           return con_type;
1711         }
1712       }
1713     }
1714 
1715     // Don't do this for integer types. There is only potential profit if
1716     // the element type t is lower than _type; that is, for int types, if _type is
1717     // more restrictive than t.  This only happens here if one is short and the other
1718     // char (both 16 bits), and in those cases we've made an intentional decision
1719     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1720     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1721     //
1722     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1723     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier


1753               // It could be narrow oop
1754               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1755             }
1756           }
1757         }
1758 #endif
1759         return jt;
1760       }
1761     }
1762   } else if (tp->base() == Type::InstPtr) {
1763     assert( off != Type::OffsetBot ||
1764             // arrays can be cast to Objects
1765             tp->is_oopptr()->klass()->is_java_lang_Object() ||
1766             // unsafe field access may not have a constant offset
1767             C->has_unsafe_access(),
1768             "Field accesses must be precise" );
1769     // For oop loads, we expect the _type to be precise.
1770 
1771     // Optimize loads from constant fields.
1772     const TypeInstPtr* tinst = tp->is_instptr();
1773     ciObject* const_oop = tinst->const_oop();














1774     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
1775       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
1776       if (con_type != NULL) {
1777         return con_type;
1778       }
1779     }
1780   } else if (tp->base() == Type::KlassPtr) {
1781     assert( off != Type::OffsetBot ||
1782             // arrays can be cast to Objects
1783             tp->is_klassptr()->klass()->is_java_lang_Object() ||
1784             // also allow array-loading from the primary supertype
1785             // array during subtype checks
1786             Opcode() == Op_LoadKlass,
1787             "Field accesses must be precise" );
1788     // For klass/static loads, we expect the _type to be precise
1789   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
1790     /* With mirrors being an indirect in the Klass*
1791      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
1792      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
1793      *




  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/compile.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "utilities/align.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/macros.hpp"
  48 #include "utilities/vmError.hpp"
  49 #if INCLUDE_ZGC
  50 #include "gc/z/c2/zBarrierSetC2.hpp"
  51 #endif
  52 #if INCLUDE_SHENANDOAHGC
  53 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  54 #endif
  55 
  56 // Portions of code courtesy of Clifford Click
  57 
  58 // Optimization - Graph Style
  59 
  60 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  61 
  62 //=============================================================================
  63 uint MemNode::size_of() const { return sizeof(*this); }
  64 
  65 const TypePtr *MemNode::adr_type() const {
  66   Node* adr = in(Address);
  67   if (adr == NULL)  return NULL; // node is dead
  68   const TypePtr* cross_check = NULL;
  69   DEBUG_ONLY(cross_check = _adr_type);
  70   return calculate_adr_type(adr->bottom_type(), cross_check);
  71 }
  72 
  73 bool MemNode::check_if_adr_maybe_raw(Node* adr) {
  74   if (adr != NULL) {


 908   Node* ld_adr = in(MemNode::Address);
 909   intptr_t ld_off = 0;
 910   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
 911   Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
 912   if (ac != NULL) {
 913     assert(ac->is_ArrayCopy(), "what kind of node can this be?");
 914 
 915     Node* mem = ac->in(TypeFunc::Memory);
 916     Node* ctl = ac->in(0);
 917     Node* src = ac->in(ArrayCopyNode::Src);
 918 
 919     if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
 920       return NULL;
 921     }
 922 
 923     LoadNode* ld = clone()->as_Load();
 924     Node* addp = in(MemNode::Address)->clone();
 925     if (ac->as_ArrayCopy()->is_clonebasic()) {
 926       assert(ld_alloc != NULL, "need an alloc");
 927       assert(addp->is_AddP(), "address must be addp");
 928       assert(ac->in(ArrayCopyNode::Dest)->is_AddP(), "dest must be an address");
 929 #if INCLUDE_SHENANDOAHGC
 930       assert(ShenandoahBarrierNode::skip_through_barrier(addp->in(AddPNode::Base)) == ShenandoahBarrierNode::skip_through_barrier(ac->in(ArrayCopyNode::Dest)->in(AddPNode::Base)), "strange pattern");
 931       assert(ShenandoahBarrierNode::skip_through_barrier(addp->in(AddPNode::Address)) == ShenandoahBarrierNode::skip_through_barrier(ac->in(ArrayCopyNode::Dest)->in(AddPNode::Address)), "strange pattern");
 932 #endif
 933       addp->set_req(AddPNode::Base, src->in(AddPNode::Base));
 934       addp->set_req(AddPNode::Address, src->in(AddPNode::Address));
 935     } else {
 936       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
 937              ac->as_ArrayCopy()->is_copyof_validated() ||
 938              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
 939       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
 940       addp->set_req(AddPNode::Base, src);
 941       addp->set_req(AddPNode::Address, src);
 942 
 943       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
 944       BasicType ary_elem  = ary_t->klass()->as_array_klass()->element_type()->basic_type();
 945       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
 946       uint shift  = exact_log2(type2aelembytes(ary_elem));
 947 
 948       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 949 #ifdef _LP64
 950       diff = phase->transform(new ConvI2LNode(diff));
 951 #endif
 952       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));


1068     }
1069 
1070     // A load from an initialization barrier can match a captured store.
1071     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1072       InitializeNode* init = st->in(0)->as_Initialize();
1073       AllocateNode* alloc = init->allocation();
1074       if ((alloc != NULL) && (alloc == ld_alloc)) {
1075         // examine a captured store value
1076         st = init->find_captured_store(ld_off, memory_size(), phase);
1077         if (st != NULL) {
1078           continue;             // take one more trip around
1079         }
1080       }
1081     }
1082 
1083     // Load boxed value from result of valueOf() call is input parameter.
1084     if (this->is_Load() && ld_adr->is_AddP() &&
1085         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1086       intptr_t ignore = 0;
1087       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
1088 #if INCLUDE_SHENANDOAHGC
1089       base = ShenandoahBarrierNode::skip_through_barrier(base);
1090 #endif
1091       if (base != NULL && base->is_Proj() &&
1092           base->as_Proj()->_con == TypeFunc::Parms &&
1093           base->in(0)->is_CallStaticJava() &&
1094           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1095         return base->in(0)->in(TypeFunc::Parms);
1096       }
1097     }
1098 
1099     break;
1100   }
1101 
1102   return NULL;
1103 }
1104 
1105 //----------------------is_instance_field_load_with_local_phi------------------
1106 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1107   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1108       in(Address)->is_AddP() ) {
1109     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1110     // Only instances and boxed values.


1120 }
1121 
1122 //------------------------------Identity---------------------------------------
1123 // Loads are identity if previous store is to same address
1124 Node* LoadNode::Identity(PhaseGVN* phase) {
1125   // If the previous store-maker is the right kind of Store, and the store is
1126   // to the same address, then we are equal to the value stored.
1127   Node* mem = in(Memory);
1128   Node* value = can_see_stored_value(mem, phase);
1129   if( value ) {
1130     // byte, short & char stores truncate naturally.
1131     // A load has to load the truncated value which requires
1132     // some sort of masking operation and that requires an
1133     // Ideal call instead of an Identity call.
1134     if (memory_size() < BytesPerInt) {
1135       // If the input to the store does not fit with the load's result type,
1136       // it must be truncated via an Ideal call.
1137       if (!phase->type(value)->higher_equal(phase->type(this)))
1138         return this;
1139     }
1140 
1141 #if INCLUDE_SHENANDOAHGC
1142     PhaseIterGVN* igvn = phase->is_IterGVN();
1143     if (UseShenandoahGC &&
1144         igvn != NULL &&
1145         value->is_Phi() &&
1146         value->req() > 2 &&
1147         value->in(1) != NULL &&
1148         value->in(1)->is_ShenandoahBarrier()) {
1149       if (igvn->_worklist.member(value) ||
1150           igvn->_worklist.member(value->in(0)) ||
1151           (value->in(0)->in(1) != NULL &&
1152            value->in(0)->in(1)->is_IfProj() &&
1153            (igvn->_worklist.member(value->in(0)->in(1)) ||
1154             (value->in(0)->in(1)->in(0) != NULL &&
1155              igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) {
1156         igvn->_worklist.push(this);
1157         return this;
1158       }
1159     }
1160     // (This works even when value is a Con, but LoadNode::Value
1161     // usually runs first, producing the singleton type of the Con.)
1162     if (UseShenandoahGC) {
1163       Node* value_no_barrier = ShenandoahBarrierNode::skip_through_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value);
1164       if (value->Opcode() == Op_EncodeP) {
1165         if (value_no_barrier != value->in(1)) {
1166           Node* encode = value->clone();
1167           encode->set_req(1, value_no_barrier);
1168           encode = phase->transform(encode);
1169           return encode;
1170         }
1171       } else {
1172         return value_no_barrier;
1173       }
1174     }
1175 #endif
1176 
1177     return value;
1178   }
1179 
1180   // Search for an existing data phi which was generated before for the same
1181   // instance's field to avoid infinite generation of phis in a loop.
1182   Node *region = mem->in(0);
1183   if (is_instance_field_load_with_local_phi(region)) {
1184     const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
1185     int this_index  = phase->C->get_alias_index(addr_t);
1186     int this_offset = addr_t->offset();
1187     int this_iid    = addr_t->instance_id();
1188     if (!addr_t->is_known_instance() &&
1189          addr_t->is_ptr_to_boxed_value()) {
1190       // Use _idx of address base (could be Phi node) for boxed values.
1191       intptr_t   ignore = 0;
1192       Node*      base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1193       if (base == NULL) {
1194         return this;
1195       }
1196       this_iid = base->_idx;


1258   if ((base == NULL) || base->is_Phi()) {
1259     // Push the loads from the phi that comes from valueOf up
1260     // through it to allow elimination of the loads and the recovery
1261     // of the original value. It is done in split_through_phi().
1262     return NULL;
1263   } else if (base->is_Load() ||
1264              (base->is_DecodeN() && base->in(1)->is_Load())) {
1265     // Eliminate the load of boxed value for integer types from the cache
1266     // array by deriving the value from the index into the array.
1267     // Capture the offset of the load and then reverse the computation.
1268 
1269     // Get LoadN node which loads a boxing object from 'cache' array.
1270     if (base->is_DecodeN()) {
1271       base = base->in(1);
1272     }
1273     if (!base->in(Address)->is_AddP()) {
1274       return NULL; // Complex address
1275     }
1276     AddPNode* address = base->in(Address)->as_AddP();
1277     Node* cache_base = address->in(AddPNode::Base);
1278 #if INCLUDE_SHENANDOAHGC
1279     cache_base = ShenandoahBarrierNode::skip_through_barrier(cache_base);
1280 #endif
1281     if ((cache_base != NULL) && cache_base->is_DecodeN()) {
1282       // Get ConP node which is static 'cache' field.
1283       cache_base = cache_base->in(1);
1284     }
1285     if ((cache_base != NULL) && cache_base->is_Con()) {
1286       const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr();
1287       if ((base_type != NULL) && base_type->is_autobox_cache()) {
1288         Node* elements[4];
1289         int shift = exact_log2(type2aelembytes(T_OBJECT));
1290         int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
1291         if (count > 0 && elements[0]->is_Con() &&
1292             (count == 1 ||
1293              (count == 2 && elements[1]->Opcode() == Op_LShiftX &&
1294                             elements[1]->in(2) == phase->intcon(shift)))) {
1295           ciObjArray* array = base_type->const_oop()->as_obj_array();
1296           // Fetch the box object cache[0] at the base of the array and get its value
1297           ciInstance* box = array->obj_at(0)->as_instance();
1298           ciInstanceKlass* ik = box->klass()->as_instance_klass();
1299           assert(ik->is_box_klass(), "sanity");
1300           assert(ik->nof_nonstatic_fields() == 1, "change following code");


1725   if (t1 == Type::TOP)  return Type::TOP;
1726   Node* adr = in(MemNode::Address);
1727   const TypePtr* tp = phase->type(adr)->isa_ptr();
1728   if (tp == NULL || tp->empty())  return Type::TOP;
1729   int off = tp->offset();
1730   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1731   Compile* C = phase->C;
1732 
1733   // Try to guess loaded type from pointer type
1734   if (tp->isa_aryptr()) {
1735     const TypeAryPtr* ary = tp->is_aryptr();
1736     const Type* t = ary->elem();
1737 
1738     // Determine whether the reference is beyond the header or not, by comparing
1739     // the offset against the offset of the start of the array's data.
1740     // Different array types begin at slightly different offsets (12 vs. 16).
1741     // We choose T_BYTE as an example base type that is least restrictive
1742     // as to alignment, which will therefore produce the smallest
1743     // possible base offset.
1744     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1745     const bool off_beyond_header = SHENANDOAHGC_ONLY((off != ShenandoahBrooksPointer::byte_offset() || !UseShenandoahGC) &&)
1746                                     ((uint)off >= (uint)min_base_off);
1747 
1748     // Try to constant-fold a stable array element.
1749     if (FoldStableValues && !is_mismatched_access() && ary->is_stable()) {
1750       // Make sure the reference is not into the header and the offset is constant
1751       ciObject* aobj = NULL;
1752 #if INCLUDE_SHENANDOAHGC
1753       if (UseShenandoahGC && adr->is_AddP() && !adr->in(AddPNode::Base)->is_top()) {
1754         Node* base = ShenandoahBarrierNode::skip_through_barrier(adr->in(AddPNode::Base));
1755         if (!base->is_top()) {
1756           aobj = phase->type(base)->is_aryptr()->const_oop();
1757         }
1758       } else
1759 #endif
1760       {
1761         aobj = ary->const_oop();
1762       }
1763       if (aobj != NULL && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
1764         int stable_dimension = (ary->stable_dimension() > 0 ? ary->stable_dimension() - 1 : 0);
1765         const Type* con_type = Type::make_constant_from_array_element(aobj->as_array(), off,
1766                                                                       stable_dimension,
1767                                                                       memory_type(), is_unsigned());
1768         if (con_type != NULL) {
1769           return con_type;
1770         }
1771       }
1772     }
1773 
1774     // Don't do this for integer types. There is only potential profit if
1775     // the element type t is lower than _type; that is, for int types, if _type is
1776     // more restrictive than t.  This only happens here if one is short and the other
1777     // char (both 16 bits), and in those cases we've made an intentional decision
1778     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1779     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1780     //
1781     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1782     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier


1812               // It could be narrow oop
1813               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1814             }
1815           }
1816         }
1817 #endif
1818         return jt;
1819       }
1820     }
1821   } else if (tp->base() == Type::InstPtr) {
1822     assert( off != Type::OffsetBot ||
1823             // arrays can be cast to Objects
1824             tp->is_oopptr()->klass()->is_java_lang_Object() ||
1825             // unsafe field access may not have a constant offset
1826             C->has_unsafe_access(),
1827             "Field accesses must be precise" );
1828     // For oop loads, we expect the _type to be precise.
1829 
1830     // Optimize loads from constant fields.
1831     const TypeInstPtr* tinst = tp->is_instptr();
1832     ciObject* const_oop = NULL;
1833 #if INCLUDE_SHENANDOAHGC
1834     if (UseShenandoahGC && adr->is_AddP() && !adr->in(AddPNode::Base)->is_top()) {
1835       Node* base = ShenandoahBarrierNode::skip_through_barrier(adr->in(AddPNode::Base));
1836       if (!base->is_top()) {
1837         const TypePtr* base_t = phase->type(base)->is_ptr();
1838         if (base_t != TypePtr::NULL_PTR) {
1839           const_oop = base_t->is_instptr()->const_oop();
1840         }
1841       }
1842     } else
1843 #endif
1844     {
1845       const_oop = tinst->const_oop();
1846     }
1847     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
1848       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
1849       if (con_type != NULL) {
1850         return con_type;
1851       }
1852     }
1853   } else if (tp->base() == Type::KlassPtr) {
1854     assert( off != Type::OffsetBot ||
1855             // arrays can be cast to Objects
1856             tp->is_klassptr()->klass()->is_java_lang_Object() ||
1857             // also allow array-loading from the primary supertype
1858             // array during subtype checks
1859             Opcode() == Op_LoadKlass,
1860             "Field accesses must be precise" );
1861     // For klass/static loads, we expect the _type to be precise
1862   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
1863     /* With mirrors being an indirect in the Klass*
1864      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
1865      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
1866      *


< prev index next >