1852 // Steps (a), (b): Walk past independent stores to find an exact match.
1853 if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1854 // (c) See if we can fold up on the spot, but don't fold up here.
1855 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1856 // just return a prior value, which is done by Identity calls.
1857 if (can_see_stored_value(prev_mem, phase)) {
1858 // Make ready for step (d):
1859 set_req_X(MemNode::Memory, prev_mem, phase);
1860 return this;
1861 }
1862 }
1863
1864 return progress ? this : nullptr;
1865 }
1866
1867 // Helper to recognize certain Klass fields which are invariant across
1868 // some group of array types (e.g., int[] or all T[] where T < Object).
1869 const Type*
1870 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1871 ciKlass* klass) const {
1872 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1873 // The field is Klass::_modifier_flags. Return its (constant) value.
1874 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1875 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1876 return TypeInt::make(klass->modifier_flags());
1877 }
1878 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1879 // The field is Klass::_access_flags. Return its (constant) value.
1880 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1881 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1882 return TypeInt::make(klass->access_flags());
1883 }
1884 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1885 // The field is Klass::_layout_helper. Return its constant value if known.
1886 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1887 return TypeInt::make(klass->layout_helper());
1888 }
1889
1890 // No match.
1891 return nullptr;
2024 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2025 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2026 return TypeInstPtr::make(klass->java_mirror());
2027 }
2028 }
2029 }
2030
2031 const TypeKlassPtr *tkls = tp->isa_klassptr();
2032 if (tkls != nullptr) {
2033 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2034 ciKlass* klass = tkls->exact_klass();
2035 // We are loading a field from a Klass metaobject whose identity
2036 // is known at compile time (the type is "exact" or "precise").
2037 // Check for fields we know are maintained as constants by the VM.
2038 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2039 // The field is Klass::_super_check_offset. Return its (constant) value.
2040 // (Folds up type checking code.)
2041 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2042 return TypeInt::make(klass->super_check_offset());
2043 }
2044 // Compute index into primary_supers array
2045 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2046 // Check for overflowing; use unsigned compare to handle the negative case.
2047 if( depth < ciKlass::primary_super_limit() ) {
2048 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2049 // (Folds up type checking code.)
2050 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2051 ciKlass *ss = klass->super_of_depth(depth);
2052 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2053 }
2054 const Type* aift = load_array_final_field(tkls, klass);
2055 if (aift != nullptr) return aift;
2056 }
2057
2058 // We can still check if we are loading from the primary_supers array at a
2059 // shallow enough depth. Even though the klass is not exact, entries less
2060 // than or equal to its super depth are correct.
2061 if (tkls->is_loaded()) {
2062 ciKlass* klass = nullptr;
2063 if (tkls->isa_instklassptr()) {
2114 if (value != nullptr && value->is_Con()) {
2115 assert(value->bottom_type()->higher_equal(_type),"sanity");
2116 return value->bottom_type();
2117 }
2118 }
2119
2120 bool is_vect = (_type->isa_vect() != nullptr);
2121 if (is_instance && !is_vect) {
2122 // If we have an instance type and our memory input is the
2123 // programs's initial memory state, there is no matching store,
2124 // so just return a zero of the appropriate type -
2125 // except if it is vectorized - then we have no zero constant.
2126 Node *mem = in(MemNode::Memory);
2127 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2128 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2129 return Type::get_zero_type(_type->basic_type());
2130 }
2131 }
2132
2133 Node* alloc = is_new_object_mark_load(phase);
2134 if (alloc != nullptr) {
2135 return TypeX::make(markWord::prototype().value());
2136 }
2137
2138 return _type;
2139 }
2140
2141 //------------------------------match_edge-------------------------------------
2142 // Do we Match on this edge index or not? Match only the address.
2143 uint LoadNode::match_edge(uint idx) const {
2144 return idx == MemNode::Address;
2145 }
2146
2147 //--------------------------LoadBNode::Ideal--------------------------------------
2148 //
2149 // If the previous store is to the same address as this load,
2150 // and the value stored was larger than a byte, replace this load
2151 // with the value stored truncated to a byte. If no truncation is
2152 // needed, the replacement is done in LoadNode::Identity().
2153 //
2154 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
1852 // Steps (a), (b): Walk past independent stores to find an exact match.
1853 if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1854 // (c) See if we can fold up on the spot, but don't fold up here.
1855 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1856 // just return a prior value, which is done by Identity calls.
1857 if (can_see_stored_value(prev_mem, phase)) {
1858 // Make ready for step (d):
1859 set_req_X(MemNode::Memory, prev_mem, phase);
1860 return this;
1861 }
1862 }
1863
1864 return progress ? this : nullptr;
1865 }
1866
1867 // Helper to recognize certain Klass fields which are invariant across
1868 // some group of array types (e.g., int[] or all T[] where T < Object).
1869 const Type*
1870 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1871 ciKlass* klass) const {
1872 if (UseCompactObjectHeaders) {
1873 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
1874 // The field is Klass::_prototype_header. Return its (constant) value.
1875 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
1876 return TypeX::make(klass->prototype_header());
1877 }
1878 }
1879 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1880 // The field is Klass::_modifier_flags. Return its (constant) value.
1881 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1882 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1883 return TypeInt::make(klass->modifier_flags());
1884 }
1885 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1886 // The field is Klass::_access_flags. Return its (constant) value.
1887 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1888 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1889 return TypeInt::make(klass->access_flags());
1890 }
1891 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1892 // The field is Klass::_layout_helper. Return its constant value if known.
1893 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1894 return TypeInt::make(klass->layout_helper());
1895 }
1896
1897 // No match.
1898 return nullptr;
2031 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2032 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2033 return TypeInstPtr::make(klass->java_mirror());
2034 }
2035 }
2036 }
2037
2038 const TypeKlassPtr *tkls = tp->isa_klassptr();
2039 if (tkls != nullptr) {
2040 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2041 ciKlass* klass = tkls->exact_klass();
2042 // We are loading a field from a Klass metaobject whose identity
2043 // is known at compile time (the type is "exact" or "precise").
2044 // Check for fields we know are maintained as constants by the VM.
2045 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2046 // The field is Klass::_super_check_offset. Return its (constant) value.
2047 // (Folds up type checking code.)
2048 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2049 return TypeInt::make(klass->super_check_offset());
2050 }
2051 if (UseCompactObjectHeaders) {
2052 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2053 // The field is Klass::_prototype_header. Return its (constant) value.
2054 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2055 return TypeX::make(klass->prototype_header());
2056 }
2057 }
2058 // Compute index into primary_supers array
2059 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2060 // Check for overflowing; use unsigned compare to handle the negative case.
2061 if( depth < ciKlass::primary_super_limit() ) {
2062 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2063 // (Folds up type checking code.)
2064 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2065 ciKlass *ss = klass->super_of_depth(depth);
2066 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2067 }
2068 const Type* aift = load_array_final_field(tkls, klass);
2069 if (aift != nullptr) return aift;
2070 }
2071
2072 // We can still check if we are loading from the primary_supers array at a
2073 // shallow enough depth. Even though the klass is not exact, entries less
2074 // than or equal to its super depth are correct.
2075 if (tkls->is_loaded()) {
2076 ciKlass* klass = nullptr;
2077 if (tkls->isa_instklassptr()) {
2128 if (value != nullptr && value->is_Con()) {
2129 assert(value->bottom_type()->higher_equal(_type),"sanity");
2130 return value->bottom_type();
2131 }
2132 }
2133
2134 bool is_vect = (_type->isa_vect() != nullptr);
2135 if (is_instance && !is_vect) {
2136 // If we have an instance type and our memory input is the
2137 // programs's initial memory state, there is no matching store,
2138 // so just return a zero of the appropriate type -
2139 // except if it is vectorized - then we have no zero constant.
2140 Node *mem = in(MemNode::Memory);
2141 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2142 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2143 return Type::get_zero_type(_type->basic_type());
2144 }
2145 }
2146
2147 Node* alloc = is_new_object_mark_load(phase);
2148 if (!UseCompactObjectHeaders && alloc != nullptr) {
2149 return TypeX::make(markWord::prototype().value());
2150 }
2151
2152 return _type;
2153 }
2154
2155 //------------------------------match_edge-------------------------------------
2156 // Do we Match on this edge index or not? Match only the address.
2157 uint LoadNode::match_edge(uint idx) const {
2158 return idx == MemNode::Address;
2159 }
2160
2161 //--------------------------LoadBNode::Ideal--------------------------------------
2162 //
2163 // If the previous store is to the same address as this load,
2164 // and the value stored was larger than a byte, replace this load
2165 // with the value stored truncated to a byte. If no truncation is
2166 // needed, the replacement is done in LoadNode::Identity().
2167 //
2168 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|