1830 set_req_X(MemNode::Memory, prev_mem, phase);
1831 return this;
1832 }
1833 }
1834
1835 AllocateNode* alloc = is_new_object_mark_load(phase);
1836 if (alloc != NULL && alloc->Opcode() == Op_Allocate && UseBiasedLocking) {
1837 InitializeNode* init = alloc->initialization();
1838 Node* control = init->proj_out(0);
1839 return alloc->make_ideal_mark(phase, address, control, mem);
1840 }
1841
1842 return progress ? this : NULL;
1843 }
1844
1845 // Helper to recognize certain Klass fields which are invariant across
1846 // some group of array types (e.g., int[] or all T[] where T < Object).
1847 const Type*
1848 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1849 ciKlass* klass) const {
1850 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1851 // The field is Klass::_modifier_flags. Return its (constant) value.
1852 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1853 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1854 return TypeInt::make(klass->modifier_flags());
1855 }
1856 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1857 // The field is Klass::_access_flags. Return its (constant) value.
1858 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1859 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1860 return TypeInt::make(klass->access_flags());
1861 }
1862 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1863 // The field is Klass::_layout_helper. Return its constant value if known.
1864 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1865 return TypeInt::make(klass->layout_helper());
1866 }
1867
1868 // No match.
1869 return NULL;
2000 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2001 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2002 return TypeInstPtr::make(klass->java_mirror());
2003 }
2004 }
2005 }
2006
2007 const TypeKlassPtr *tkls = tp->isa_klassptr();
2008 if (tkls != NULL && !StressReflectiveCode) {
2009 ciKlass* klass = tkls->klass();
2010 if (klass->is_loaded() && tkls->klass_is_exact()) {
2011 // We are loading a field from a Klass metaobject whose identity
2012 // is known at compile time (the type is "exact" or "precise").
2013 // Check for fields we know are maintained as constants by the VM.
2014 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2015 // The field is Klass::_super_check_offset. Return its (constant) value.
2016 // (Folds up type checking code.)
2017 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2018 return TypeInt::make(klass->super_check_offset());
2019 }
2020 // Compute index into primary_supers array
2021 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2022 // Check for overflowing; use unsigned compare to handle the negative case.
2023 if( depth < ciKlass::primary_super_limit() ) {
2024 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2025 // (Folds up type checking code.)
2026 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2027 ciKlass *ss = klass->super_of_depth(depth);
2028 return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2029 }
2030 const Type* aift = load_array_final_field(tkls, klass);
2031 if (aift != NULL) return aift;
2032 }
2033
2034 // We can still check if we are loading from the primary_supers array at a
2035 // shallow enough depth. Even though the klass is not exact, entries less
2036 // than or equal to its super depth are correct.
2037 if (klass->is_loaded() ) {
2038 ciType *inner = klass;
2039 while( inner->is_obj_array_klass() )
2084 if (value != NULL && value->is_Con()) {
2085 assert(value->bottom_type()->higher_equal(_type),"sanity");
2086 return value->bottom_type();
2087 }
2088 }
2089
2090 bool is_vect = (_type->isa_vect() != NULL);
2091 if (is_instance && !is_vect) {
2092 // If we have an instance type and our memory input is the
2093 // programs's initial memory state, there is no matching store,
2094 // so just return a zero of the appropriate type -
2095 // except if it is vectorized - then we have no zero constant.
2096 Node *mem = in(MemNode::Memory);
2097 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2098 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2099 return Type::get_zero_type(_type->basic_type());
2100 }
2101 }
2102
2103 Node* alloc = is_new_object_mark_load(phase);
2104 if (alloc != NULL && !(alloc->Opcode() == Op_Allocate && UseBiasedLocking)) {
2105 return TypeX::make(markWord::prototype().value());
2106 }
2107
2108 return _type;
2109 }
2110
2111 //------------------------------match_edge-------------------------------------
2112 // Do we Match on this edge index or not? Match only the address.
2113 uint LoadNode::match_edge(uint idx) const {
2114 return idx == MemNode::Address;
2115 }
2116
2117 //--------------------------LoadBNode::Ideal--------------------------------------
2118 //
2119 // If the previous store is to the same address as this load,
2120 // and the value stored was larger than a byte, replace this load
2121 // with the value stored truncated to a byte. If no truncation is
2122 // needed, the replacement is done in LoadNode::Identity().
2123 //
2124 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
1830 set_req_X(MemNode::Memory, prev_mem, phase);
1831 return this;
1832 }
1833 }
1834
1835 AllocateNode* alloc = is_new_object_mark_load(phase);
1836 if (alloc != NULL && alloc->Opcode() == Op_Allocate && UseBiasedLocking) {
1837 InitializeNode* init = alloc->initialization();
1838 Node* control = init->proj_out(0);
1839 return alloc->make_ideal_mark(phase, address, control, mem);
1840 }
1841
1842 return progress ? this : NULL;
1843 }
1844
1845 // Helper to recognize certain Klass fields which are invariant across
1846 // some group of array types (e.g., int[] or all T[] where T < Object).
1847 const Type*
1848 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1849 ciKlass* klass) const {
1850 if (UseCompactObjectHeaders) {
1851 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
1852 // The field is Klass::_prototype_header. Return its (constant) value.
1853 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
1854 return TypeX::make(klass->prototype_header());
1855 }
1856 }
1857 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1858 // The field is Klass::_modifier_flags. Return its (constant) value.
1859 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1860 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1861 return TypeInt::make(klass->modifier_flags());
1862 }
1863 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1864 // The field is Klass::_access_flags. Return its (constant) value.
1865 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1866 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1867 return TypeInt::make(klass->access_flags());
1868 }
1869 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1870 // The field is Klass::_layout_helper. Return its constant value if known.
1871 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1872 return TypeInt::make(klass->layout_helper());
1873 }
1874
1875 // No match.
1876 return NULL;
2007 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2008 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2009 return TypeInstPtr::make(klass->java_mirror());
2010 }
2011 }
2012 }
2013
2014 const TypeKlassPtr *tkls = tp->isa_klassptr();
2015 if (tkls != NULL && !StressReflectiveCode) {
2016 ciKlass* klass = tkls->klass();
2017 if (klass->is_loaded() && tkls->klass_is_exact()) {
2018 // We are loading a field from a Klass metaobject whose identity
2019 // is known at compile time (the type is "exact" or "precise").
2020 // Check for fields we know are maintained as constants by the VM.
2021 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2022 // The field is Klass::_super_check_offset. Return its (constant) value.
2023 // (Folds up type checking code.)
2024 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2025 return TypeInt::make(klass->super_check_offset());
2026 }
2027 if (UseCompactObjectHeaders) {
2028 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2029 // The field is Klass::_prototype_header. Return its (constant) value.
2030 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2031 return TypeX::make(klass->prototype_header());
2032 }
2033 }
2034 // Compute index into primary_supers array
2035 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2036 // Check for overflowing; use unsigned compare to handle the negative case.
2037 if( depth < ciKlass::primary_super_limit() ) {
2038 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2039 // (Folds up type checking code.)
2040 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2041 ciKlass *ss = klass->super_of_depth(depth);
2042 return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2043 }
2044 const Type* aift = load_array_final_field(tkls, klass);
2045 if (aift != NULL) return aift;
2046 }
2047
2048 // We can still check if we are loading from the primary_supers array at a
2049 // shallow enough depth. Even though the klass is not exact, entries less
2050 // than or equal to its super depth are correct.
2051 if (klass->is_loaded() ) {
2052 ciType *inner = klass;
2053 while( inner->is_obj_array_klass() )
2098 if (value != NULL && value->is_Con()) {
2099 assert(value->bottom_type()->higher_equal(_type),"sanity");
2100 return value->bottom_type();
2101 }
2102 }
2103
2104 bool is_vect = (_type->isa_vect() != NULL);
2105 if (is_instance && !is_vect) {
2106 // If we have an instance type and our memory input is the
2107 // programs's initial memory state, there is no matching store,
2108 // so just return a zero of the appropriate type -
2109 // except if it is vectorized - then we have no zero constant.
2110 Node *mem = in(MemNode::Memory);
2111 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2112 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2113 return Type::get_zero_type(_type->basic_type());
2114 }
2115 }
2116
2117 Node* alloc = is_new_object_mark_load(phase);
2118 if (alloc != NULL && !(alloc->Opcode() == Op_Allocate && UseBiasedLocking) && !UseCompactObjectHeaders) {
2119 return TypeX::make(markWord::prototype().value());
2120 }
2121
2122 return _type;
2123 }
2124
2125 //------------------------------match_edge-------------------------------------
2126 // Do we Match on this edge index or not? Match only the address.
2127 uint LoadNode::match_edge(uint idx) const {
2128 return idx == MemNode::Address;
2129 }
2130
2131 //--------------------------LoadBNode::Ideal--------------------------------------
2132 //
2133 // If the previous store is to the same address as this load,
2134 // and the value stored was larger than a byte, replace this load
2135 // with the value stored truncated to a byte. If no truncation is
2136 // needed, the replacement is done in LoadNode::Identity().
2137 //
2138 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|