1868 // Steps (a), (b): Walk past independent stores to find an exact match.
1869 if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1870 // (c) See if we can fold up on the spot, but don't fold up here.
1871 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1872 // just return a prior value, which is done by Identity calls.
1873 if (can_see_stored_value(prev_mem, phase)) {
1874 // Make ready for step (d):
1875 set_req_X(MemNode::Memory, prev_mem, phase);
1876 return this;
1877 }
1878 }
1879
1880 return progress ? this : nullptr;
1881 }
1882
1883 // Helper to recognize certain Klass fields which are invariant across
1884 // some group of array types (e.g., int[] or all T[] where T < Object).
1885 const Type*
1886 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1887 ciKlass* klass) const {
1888 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1889 // The field is Klass::_modifier_flags. Return its (constant) value.
1890 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1891 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1892 return TypeInt::make(klass->modifier_flags());
1893 }
1894 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1895 // The field is Klass::_access_flags. Return its (constant) value.
1896 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1897 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1898 return TypeInt::make(klass->access_flags());
1899 }
1900 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1901 // The field is Klass::_layout_helper. Return its constant value if known.
1902 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1903 return TypeInt::make(klass->layout_helper());
1904 }
1905
1906 // No match.
1907 return nullptr;
2040 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2041 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2042 return TypeInstPtr::make(klass->java_mirror());
2043 }
2044 }
2045 }
2046
2047 const TypeKlassPtr *tkls = tp->isa_klassptr();
2048 if (tkls != nullptr) {
2049 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2050 ciKlass* klass = tkls->exact_klass();
2051 // We are loading a field from a Klass metaobject whose identity
2052 // is known at compile time (the type is "exact" or "precise").
2053 // Check for fields we know are maintained as constants by the VM.
2054 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2055 // The field is Klass::_super_check_offset. Return its (constant) value.
2056 // (Folds up type checking code.)
2057 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2058 return TypeInt::make(klass->super_check_offset());
2059 }
2060 // Compute index into primary_supers array
2061 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2062 // Check for overflowing; use unsigned compare to handle the negative case.
2063 if( depth < ciKlass::primary_super_limit() ) {
2064 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2065 // (Folds up type checking code.)
2066 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2067 ciKlass *ss = klass->super_of_depth(depth);
2068 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2069 }
2070 const Type* aift = load_array_final_field(tkls, klass);
2071 if (aift != nullptr) return aift;
2072 }
2073
2074 // We can still check if we are loading from the primary_supers array at a
2075 // shallow enough depth. Even though the klass is not exact, entries less
2076 // than or equal to its super depth are correct.
2077 if (tkls->is_loaded()) {
2078 ciKlass* klass = nullptr;
2079 if (tkls->isa_instklassptr()) {
2130 if (value != nullptr && value->is_Con()) {
2131 assert(value->bottom_type()->higher_equal(_type),"sanity");
2132 return value->bottom_type();
2133 }
2134 }
2135
2136 bool is_vect = (_type->isa_vect() != nullptr);
2137 if (is_instance && !is_vect) {
2138 // If we have an instance type and our memory input is the
2139 // programs's initial memory state, there is no matching store,
2140 // so just return a zero of the appropriate type -
2141 // except if it is vectorized - then we have no zero constant.
2142 Node *mem = in(MemNode::Memory);
2143 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2144 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2145 return Type::get_zero_type(_type->basic_type());
2146 }
2147 }
2148
2149 Node* alloc = is_new_object_mark_load(phase);
2150 if (alloc != nullptr) {
2151 return TypeX::make(markWord::prototype().value());
2152 }
2153
2154 return _type;
2155 }
2156
2157 //------------------------------match_edge-------------------------------------
2158 // Do we Match on this edge index or not? Match only the address.
2159 uint LoadNode::match_edge(uint idx) const {
2160 return idx == MemNode::Address;
2161 }
2162
2163 //--------------------------LoadBNode::Ideal--------------------------------------
2164 //
2165 // If the previous store is to the same address as this load,
2166 // and the value stored was larger than a byte, replace this load
2167 // with the value stored truncated to a byte. If no truncation is
2168 // needed, the replacement is done in LoadNode::Identity().
2169 //
2170 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
1868 // Steps (a), (b): Walk past independent stores to find an exact match.
1869 if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1870 // (c) See if we can fold up on the spot, but don't fold up here.
1871 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1872 // just return a prior value, which is done by Identity calls.
1873 if (can_see_stored_value(prev_mem, phase)) {
1874 // Make ready for step (d):
1875 set_req_X(MemNode::Memory, prev_mem, phase);
1876 return this;
1877 }
1878 }
1879
1880 return progress ? this : nullptr;
1881 }
1882
1883 // Helper to recognize certain Klass fields which are invariant across
1884 // some group of array types (e.g., int[] or all T[] where T < Object).
1885 const Type*
1886 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1887 ciKlass* klass) const {
1888 if (UseCompactObjectHeaders) {
1889 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
1890 // The field is Klass::_prototype_header. Return its (constant) value.
1891 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
1892 return TypeX::make(klass->prototype_header());
1893 }
1894 }
1895 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1896 // The field is Klass::_modifier_flags. Return its (constant) value.
1897 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1898 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1899 return TypeInt::make(klass->modifier_flags());
1900 }
1901 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1902 // The field is Klass::_access_flags. Return its (constant) value.
1903 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1904 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1905 return TypeInt::make(klass->access_flags());
1906 }
1907 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1908 // The field is Klass::_layout_helper. Return its constant value if known.
1909 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1910 return TypeInt::make(klass->layout_helper());
1911 }
1912
1913 // No match.
1914 return nullptr;
2047 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2048 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2049 return TypeInstPtr::make(klass->java_mirror());
2050 }
2051 }
2052 }
2053
2054 const TypeKlassPtr *tkls = tp->isa_klassptr();
2055 if (tkls != nullptr) {
2056 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2057 ciKlass* klass = tkls->exact_klass();
2058 // We are loading a field from a Klass metaobject whose identity
2059 // is known at compile time (the type is "exact" or "precise").
2060 // Check for fields we know are maintained as constants by the VM.
2061 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2062 // The field is Klass::_super_check_offset. Return its (constant) value.
2063 // (Folds up type checking code.)
2064 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2065 return TypeInt::make(klass->super_check_offset());
2066 }
2067 if (UseCompactObjectHeaders) {
2068 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2069 // The field is Klass::_prototype_header. Return its (constant) value.
2070 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2071 return TypeX::make(klass->prototype_header());
2072 }
2073 }
2074 // Compute index into primary_supers array
2075 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2076 // Check for overflowing; use unsigned compare to handle the negative case.
2077 if( depth < ciKlass::primary_super_limit() ) {
2078 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2079 // (Folds up type checking code.)
2080 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2081 ciKlass *ss = klass->super_of_depth(depth);
2082 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2083 }
2084 const Type* aift = load_array_final_field(tkls, klass);
2085 if (aift != nullptr) return aift;
2086 }
2087
2088 // We can still check if we are loading from the primary_supers array at a
2089 // shallow enough depth. Even though the klass is not exact, entries less
2090 // than or equal to its super depth are correct.
2091 if (tkls->is_loaded()) {
2092 ciKlass* klass = nullptr;
2093 if (tkls->isa_instklassptr()) {
2144 if (value != nullptr && value->is_Con()) {
2145 assert(value->bottom_type()->higher_equal(_type),"sanity");
2146 return value->bottom_type();
2147 }
2148 }
2149
2150 bool is_vect = (_type->isa_vect() != nullptr);
2151 if (is_instance && !is_vect) {
2152 // If we have an instance type and our memory input is the
2153 // programs's initial memory state, there is no matching store,
2154 // so just return a zero of the appropriate type -
2155 // except if it is vectorized - then we have no zero constant.
2156 Node *mem = in(MemNode::Memory);
2157 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2158 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2159 return Type::get_zero_type(_type->basic_type());
2160 }
2161 }
2162
2163 Node* alloc = is_new_object_mark_load(phase);
2164 if (!UseCompactObjectHeaders && alloc != nullptr) {
2165 return TypeX::make(markWord::prototype().value());
2166 }
2167
2168 return _type;
2169 }
2170
2171 //------------------------------match_edge-------------------------------------
2172 // Do we Match on this edge index or not? Match only the address.
2173 uint LoadNode::match_edge(uint idx) const {
2174 return idx == MemNode::Address;
2175 }
2176
2177 //--------------------------LoadBNode::Ideal--------------------------------------
2178 //
2179 // If the previous store is to the same address as this load,
2180 // and the value stored was larger than a byte, replace this load
2181 // with the value stored truncated to a byte. If no truncation is
2182 // needed, the replacement is done in LoadNode::Identity().
2183 //
2184 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|