1917 // Steps (a), (b): Walk past independent stores to find an exact match.
1918 if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1919 // (c) See if we can fold up on the spot, but don't fold up here.
1920 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1921 // just return a prior value, which is done by Identity calls.
1922 if (can_see_stored_value(prev_mem, phase)) {
1923 // Make ready for step (d):
1924 set_req_X(MemNode::Memory, prev_mem, phase);
1925 return this;
1926 }
1927 }
1928
1929 return progress ? this : nullptr;
1930 }
1931
1932 // Helper to recognize certain Klass fields which are invariant across
1933 // some group of array types (e.g., int[] or all T[] where T < Object).
1934 const Type*
1935 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1936 ciKlass* klass) const {
1937 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1938 // The field is Klass::_modifier_flags. Return its (constant) value.
1939 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1940 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1941 return TypeInt::make(klass->modifier_flags());
1942 }
1943 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1944 // The field is Klass::_access_flags. Return its (constant) value.
1945 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1946 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1947 return TypeInt::make(klass->access_flags());
1948 }
1949 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1950 // The field is Klass::_layout_helper. Return its constant value if known.
1951 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1952 return TypeInt::make(klass->layout_helper());
1953 }
1954
1955 // No match.
1956 return nullptr;
2089 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2090 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2091 return TypeInstPtr::make(klass->java_mirror());
2092 }
2093 }
2094 }
2095
2096 const TypeKlassPtr *tkls = tp->isa_klassptr();
2097 if (tkls != nullptr) {
2098 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2099 ciKlass* klass = tkls->exact_klass();
2100 // We are loading a field from a Klass metaobject whose identity
2101 // is known at compile time (the type is "exact" or "precise").
2102 // Check for fields we know are maintained as constants by the VM.
2103 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2104 // The field is Klass::_super_check_offset. Return its (constant) value.
2105 // (Folds up type checking code.)
2106 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2107 return TypeInt::make(klass->super_check_offset());
2108 }
2109 // Compute index into primary_supers array
2110 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2111 // Check for overflowing; use unsigned compare to handle the negative case.
2112 if( depth < ciKlass::primary_super_limit() ) {
2113 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2114 // (Folds up type checking code.)
2115 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2116 ciKlass *ss = klass->super_of_depth(depth);
2117 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2118 }
2119 const Type* aift = load_array_final_field(tkls, klass);
2120 if (aift != nullptr) return aift;
2121 }
2122
2123 // We can still check if we are loading from the primary_supers array at a
2124 // shallow enough depth. Even though the klass is not exact, entries less
2125 // than or equal to its super depth are correct.
2126 if (tkls->is_loaded()) {
2127 ciKlass* klass = nullptr;
2128 if (tkls->isa_instklassptr()) {
2179 if (value != nullptr && value->is_Con()) {
2180 assert(value->bottom_type()->higher_equal(_type),"sanity");
2181 return value->bottom_type();
2182 }
2183 }
2184
2185 bool is_vect = (_type->isa_vect() != nullptr);
2186 if (is_instance && !is_vect) {
2187 // If we have an instance type and our memory input is the
2188 // programs's initial memory state, there is no matching store,
2189 // so just return a zero of the appropriate type -
2190 // except if it is vectorized - then we have no zero constant.
2191 Node *mem = in(MemNode::Memory);
2192 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2193 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2194 return Type::get_zero_type(_type->basic_type());
2195 }
2196 }
2197
2198 Node* alloc = is_new_object_mark_load();
2199 if (alloc != nullptr) {
2200 return TypeX::make(markWord::prototype().value());
2201 }
2202
2203 return _type;
2204 }
2205
2206 //------------------------------match_edge-------------------------------------
2207 // Do we Match on this edge index or not? Match only the address.
2208 uint LoadNode::match_edge(uint idx) const {
2209 return idx == MemNode::Address;
2210 }
2211
2212 //--------------------------LoadBNode::Ideal--------------------------------------
2213 //
2214 // If the previous store is to the same address as this load,
2215 // and the value stored was larger than a byte, replace this load
2216 // with the value stored truncated to a byte. If no truncation is
2217 // needed, the replacement is done in LoadNode::Identity().
2218 //
2219 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
1917 // Steps (a), (b): Walk past independent stores to find an exact match.
1918 if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1919 // (c) See if we can fold up on the spot, but don't fold up here.
1920 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1921 // just return a prior value, which is done by Identity calls.
1922 if (can_see_stored_value(prev_mem, phase)) {
1923 // Make ready for step (d):
1924 set_req_X(MemNode::Memory, prev_mem, phase);
1925 return this;
1926 }
1927 }
1928
1929 return progress ? this : nullptr;
1930 }
1931
1932 // Helper to recognize certain Klass fields which are invariant across
1933 // some group of array types (e.g., int[] or all T[] where T < Object).
1934 const Type*
1935 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1936 ciKlass* klass) const {
1937 if (UseCompactObjectHeaders) {
1938 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
1939 // The field is Klass::_prototype_header. Return its (constant) value.
1940 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
1941 return TypeX::make(klass->prototype_header());
1942 }
1943 }
1944 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1945 // The field is Klass::_modifier_flags. Return its (constant) value.
1946 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1947 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1948 return TypeInt::make(klass->modifier_flags());
1949 }
1950 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1951 // The field is Klass::_access_flags. Return its (constant) value.
1952 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1953 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1954 return TypeInt::make(klass->access_flags());
1955 }
1956 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1957 // The field is Klass::_layout_helper. Return its constant value if known.
1958 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1959 return TypeInt::make(klass->layout_helper());
1960 }
1961
1962 // No match.
1963 return nullptr;
2096 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2097 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2098 return TypeInstPtr::make(klass->java_mirror());
2099 }
2100 }
2101 }
2102
2103 const TypeKlassPtr *tkls = tp->isa_klassptr();
2104 if (tkls != nullptr) {
2105 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2106 ciKlass* klass = tkls->exact_klass();
2107 // We are loading a field from a Klass metaobject whose identity
2108 // is known at compile time (the type is "exact" or "precise").
2109 // Check for fields we know are maintained as constants by the VM.
2110 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2111 // The field is Klass::_super_check_offset. Return its (constant) value.
2112 // (Folds up type checking code.)
2113 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2114 return TypeInt::make(klass->super_check_offset());
2115 }
2116 if (UseCompactObjectHeaders) {
2117 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2118 // The field is Klass::_prototype_header. Return its (constant) value.
2119 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2120 return TypeX::make(klass->prototype_header());
2121 }
2122 }
2123 // Compute index into primary_supers array
2124 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2125 // Check for overflowing; use unsigned compare to handle the negative case.
2126 if( depth < ciKlass::primary_super_limit() ) {
2127 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2128 // (Folds up type checking code.)
2129 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2130 ciKlass *ss = klass->super_of_depth(depth);
2131 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2132 }
2133 const Type* aift = load_array_final_field(tkls, klass);
2134 if (aift != nullptr) return aift;
2135 }
2136
2137 // We can still check if we are loading from the primary_supers array at a
2138 // shallow enough depth. Even though the klass is not exact, entries less
2139 // than or equal to its super depth are correct.
2140 if (tkls->is_loaded()) {
2141 ciKlass* klass = nullptr;
2142 if (tkls->isa_instklassptr()) {
2193 if (value != nullptr && value->is_Con()) {
2194 assert(value->bottom_type()->higher_equal(_type),"sanity");
2195 return value->bottom_type();
2196 }
2197 }
2198
2199 bool is_vect = (_type->isa_vect() != nullptr);
2200 if (is_instance && !is_vect) {
2201 // If we have an instance type and our memory input is the
2202 // programs's initial memory state, there is no matching store,
2203 // so just return a zero of the appropriate type -
2204 // except if it is vectorized - then we have no zero constant.
2205 Node *mem = in(MemNode::Memory);
2206 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2207 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2208 return Type::get_zero_type(_type->basic_type());
2209 }
2210 }
2211
2212 Node* alloc = is_new_object_mark_load();
2213 if (!UseCompactObjectHeaders && alloc != nullptr) {
2214 return TypeX::make(markWord::prototype().value());
2215 }
2216
2217 return _type;
2218 }
2219
2220 //------------------------------match_edge-------------------------------------
2221 // Do we Match on this edge index or not? Match only the address.
2222 uint LoadNode::match_edge(uint idx) const {
2223 return idx == MemNode::Address;
2224 }
2225
2226 //--------------------------LoadBNode::Ideal--------------------------------------
2227 //
2228 // If the previous store is to the same address as this load,
2229 // and the value stored was larger than a byte, replace this load
2230 // with the value stored truncated to a byte. If no truncation is
2231 // needed, the replacement is done in LoadNode::Identity().
2232 //
2233 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|