1857 // Steps (a), (b): Walk past independent stores to find an exact match.
1858 if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1859 // (c) See if we can fold up on the spot, but don't fold up here.
1860 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1861 // just return a prior value, which is done by Identity calls.
1862 if (can_see_stored_value(prev_mem, phase)) {
1863 // Make ready for step (d):
1864 set_req_X(MemNode::Memory, prev_mem, phase);
1865 return this;
1866 }
1867 }
1868
1869 return progress ? this : nullptr;
1870 }
1871
1872 // Helper to recognize certain Klass fields which are invariant across
1873 // some group of array types (e.g., int[] or all T[] where T < Object).
1874 const Type*
1875 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1876 ciKlass* klass) const {
1877 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1878 // The field is Klass::_modifier_flags. Return its (constant) value.
1879 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1880 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1881 return TypeInt::make(klass->modifier_flags());
1882 }
1883 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1884 // The field is Klass::_access_flags. Return its (constant) value.
1885 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1886 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1887 return TypeInt::make(klass->access_flags());
1888 }
1889 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1890 // The field is Klass::_layout_helper. Return its constant value if known.
1891 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1892 return TypeInt::make(klass->layout_helper());
1893 }
1894
1895 // No match.
1896 return nullptr;
2029 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2030 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2031 return TypeInstPtr::make(klass->java_mirror());
2032 }
2033 }
2034 }
2035
2036 const TypeKlassPtr *tkls = tp->isa_klassptr();
2037 if (tkls != nullptr) {
2038 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2039 ciKlass* klass = tkls->exact_klass();
2040 // We are loading a field from a Klass metaobject whose identity
2041 // is known at compile time (the type is "exact" or "precise").
2042 // Check for fields we know are maintained as constants by the VM.
2043 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2044 // The field is Klass::_super_check_offset. Return its (constant) value.
2045 // (Folds up type checking code.)
2046 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2047 return TypeInt::make(klass->super_check_offset());
2048 }
2049 // Compute index into primary_supers array
2050 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2051 // Check for overflowing; use unsigned compare to handle the negative case.
2052 if( depth < ciKlass::primary_super_limit() ) {
2053 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2054 // (Folds up type checking code.)
2055 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2056 ciKlass *ss = klass->super_of_depth(depth);
2057 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2058 }
2059 const Type* aift = load_array_final_field(tkls, klass);
2060 if (aift != nullptr) return aift;
2061 }
2062
2063 // We can still check if we are loading from the primary_supers array at a
2064 // shallow enough depth. Even though the klass is not exact, entries less
2065 // than or equal to its super depth are correct.
2066 if (tkls->is_loaded()) {
2067 ciKlass* klass = nullptr;
2068 if (tkls->isa_instklassptr()) {
2119 if (value != nullptr && value->is_Con()) {
2120 assert(value->bottom_type()->higher_equal(_type),"sanity");
2121 return value->bottom_type();
2122 }
2123 }
2124
2125 bool is_vect = (_type->isa_vect() != nullptr);
2126 if (is_instance && !is_vect) {
2127 // If we have an instance type and our memory input is the
2128 // programs's initial memory state, there is no matching store,
2129 // so just return a zero of the appropriate type -
2130 // except if it is vectorized - then we have no zero constant.
2131 Node *mem = in(MemNode::Memory);
2132 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2133 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2134 return Type::get_zero_type(_type->basic_type());
2135 }
2136 }
2137
2138 Node* alloc = is_new_object_mark_load(phase);
2139 if (alloc != nullptr) {
2140 return TypeX::make(markWord::prototype().value());
2141 }
2142
2143 return _type;
2144 }
2145
2146 //------------------------------match_edge-------------------------------------
2147 // Do we Match on this edge index or not? Match only the address.
2148 uint LoadNode::match_edge(uint idx) const {
2149 return idx == MemNode::Address;
2150 }
2151
2152 //--------------------------LoadBNode::Ideal--------------------------------------
2153 //
2154 // If the previous store is to the same address as this load,
2155 // and the value stored was larger than a byte, replace this load
2156 // with the value stored truncated to a byte. If no truncation is
2157 // needed, the replacement is done in LoadNode::Identity().
2158 //
2159 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
1857 // Steps (a), (b): Walk past independent stores to find an exact match.
1858 if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1859 // (c) See if we can fold up on the spot, but don't fold up here.
1860 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1861 // just return a prior value, which is done by Identity calls.
1862 if (can_see_stored_value(prev_mem, phase)) {
1863 // Make ready for step (d):
1864 set_req_X(MemNode::Memory, prev_mem, phase);
1865 return this;
1866 }
1867 }
1868
1869 return progress ? this : nullptr;
1870 }
1871
1872 // Helper to recognize certain Klass fields which are invariant across
1873 // some group of array types (e.g., int[] or all T[] where T < Object).
1874 const Type*
1875 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1876 ciKlass* klass) const {
1877 if (UseCompactObjectHeaders) {
1878 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
1879 // The field is Klass::_prototype_header. Return its (constant) value.
1880 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
1881 return TypeX::make(klass->prototype_header());
1882 }
1883 }
1884 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1885 // The field is Klass::_modifier_flags. Return its (constant) value.
1886 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1887 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1888 return TypeInt::make(klass->modifier_flags());
1889 }
1890 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1891 // The field is Klass::_access_flags. Return its (constant) value.
1892 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1893 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1894 return TypeInt::make(klass->access_flags());
1895 }
1896 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1897 // The field is Klass::_layout_helper. Return its constant value if known.
1898 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1899 return TypeInt::make(klass->layout_helper());
1900 }
1901
1902 // No match.
1903 return nullptr;
2036 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2037 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2038 return TypeInstPtr::make(klass->java_mirror());
2039 }
2040 }
2041 }
2042
2043 const TypeKlassPtr *tkls = tp->isa_klassptr();
2044 if (tkls != nullptr) {
2045 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2046 ciKlass* klass = tkls->exact_klass();
2047 // We are loading a field from a Klass metaobject whose identity
2048 // is known at compile time (the type is "exact" or "precise").
2049 // Check for fields we know are maintained as constants by the VM.
2050 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2051 // The field is Klass::_super_check_offset. Return its (constant) value.
2052 // (Folds up type checking code.)
2053 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2054 return TypeInt::make(klass->super_check_offset());
2055 }
2056 if (UseCompactObjectHeaders) {
2057 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2058 // The field is Klass::_prototype_header. Return its (constant) value.
2059 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2060 return TypeX::make(klass->prototype_header());
2061 }
2062 }
2063 // Compute index into primary_supers array
2064 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2065 // Check for overflowing; use unsigned compare to handle the negative case.
2066 if( depth < ciKlass::primary_super_limit() ) {
2067 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2068 // (Folds up type checking code.)
2069 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2070 ciKlass *ss = klass->super_of_depth(depth);
2071 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2072 }
2073 const Type* aift = load_array_final_field(tkls, klass);
2074 if (aift != nullptr) return aift;
2075 }
2076
2077 // We can still check if we are loading from the primary_supers array at a
2078 // shallow enough depth. Even though the klass is not exact, entries less
2079 // than or equal to its super depth are correct.
2080 if (tkls->is_loaded()) {
2081 ciKlass* klass = nullptr;
2082 if (tkls->isa_instklassptr()) {
2133 if (value != nullptr && value->is_Con()) {
2134 assert(value->bottom_type()->higher_equal(_type),"sanity");
2135 return value->bottom_type();
2136 }
2137 }
2138
2139 bool is_vect = (_type->isa_vect() != nullptr);
2140 if (is_instance && !is_vect) {
2141 // If we have an instance type and our memory input is the
2142 // programs's initial memory state, there is no matching store,
2143 // so just return a zero of the appropriate type -
2144 // except if it is vectorized - then we have no zero constant.
2145 Node *mem = in(MemNode::Memory);
2146 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2147 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2148 return Type::get_zero_type(_type->basic_type());
2149 }
2150 }
2151
2152 Node* alloc = is_new_object_mark_load(phase);
2153 if (!UseCompactObjectHeaders && alloc != nullptr) {
2154 return TypeX::make(markWord::prototype().value());
2155 }
2156
2157 return _type;
2158 }
2159
2160 //------------------------------match_edge-------------------------------------
2161 // Do we Match on this edge index or not? Match only the address.
2162 uint LoadNode::match_edge(uint idx) const {
2163 return idx == MemNode::Address;
2164 }
2165
2166 //--------------------------LoadBNode::Ideal--------------------------------------
2167 //
2168 // If the previous store is to the same address as this load,
2169 // and the value stored was larger than a byte, replace this load
2170 // with the value stored truncated to a byte. If no truncation is
2171 // needed, the replacement is done in LoadNode::Identity().
2172 //
2173 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|