89 // fake the missing field
90 const TypePtr* _adr_type = nullptr;
91 if (in(Address) != nullptr)
92 _adr_type = in(Address)->bottom_type()->isa_ptr();
93 #endif
94 dump_adr_type(_adr_type, st);
95
96 Compile* C = Compile::current();
97 if (C->alias_type(_adr_type)->is_volatile()) {
98 st->print(" Volatile!");
99 }
100 if (_unaligned_access) {
101 st->print(" unaligned");
102 }
103 if (_mismatched_access) {
104 st->print(" mismatched");
105 }
106 if (_unsafe_access) {
107 st->print(" unsafe");
108 }
109 }
110
111 void MemNode::dump_adr_type(const TypePtr* adr_type, outputStream* st) {
112 st->print(" @");
113 if (adr_type == nullptr) {
114 st->print("null");
115 } else {
116 adr_type->dump_on(st);
117 Compile* C = Compile::current();
118 Compile::AliasType* atp = nullptr;
119 if (C->have_alias_type(adr_type)) atp = C->alias_type(adr_type);
120 if (atp == nullptr)
121 st->print(", idx=?\?;");
122 else if (atp->index() == Compile::AliasIdxBot)
123 st->print(", idx=Bot;");
124 else if (atp->index() == Compile::AliasIdxTop)
125 st->print(", idx=Top;");
126 else if (atp->index() == Compile::AliasIdxRaw)
127 st->print(", idx=Raw;");
128 else {
633 return ac;
634 }
635 }
636 }
637 }
638 }
639 return nullptr;
640 }
641
642 ArrayCopyNode* MemNode::find_array_copy_clone(Node* ld_alloc, Node* mem) const {
643 if (mem->is_Proj() && mem->in(0) != nullptr && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
644 mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
645 if (ld_alloc != nullptr) {
646 // Check if there is an array copy for a clone
647 Node* mb = mem->in(0);
648 ArrayCopyNode* ac = nullptr;
649 if (mb->in(0) != nullptr && mb->in(0)->is_Proj() &&
650 mb->in(0)->in(0) != nullptr && mb->in(0)->in(0)->is_ArrayCopy()) {
651 ac = mb->in(0)->in(0)->as_ArrayCopy();
652 } else {
653 // Step over GC barrier when ReduceInitialCardMarks is disabled
654 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
655 Node* control_proj_ac = bs->step_over_gc_barrier(mb->in(0));
656
657 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
658 ac = control_proj_ac->in(0)->as_ArrayCopy();
659 }
660 }
661
662 if (ac != nullptr && ac->is_clonebasic()) {
663 AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest));
664 if (alloc != nullptr && alloc == ld_alloc) {
665 return ac;
666 }
667 }
668 }
669 }
670 return nullptr;
671 }
672
673 // The logic for reordering loads and stores uses four steps:
674 // (a) Walk carefully past stores and initializations which we
675 // can prove are independent of this load.
676 // (b) Observe that the next memory state makes an exact match
855 Compile* C = Compile::current();
856 assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
857 "must stay in the original alias category");
858 // The type of the address must be contained in the adr_type,
859 // disregarding "null"-ness.
860 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
861 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
862 assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
863 "real address must not escape from expected memory type");
864 }
865 #endif
866 return tp;
867 }
868 }
869
870 uint8_t MemNode::barrier_data(const Node* n) {
871 if (n->is_LoadStore()) {
872 return n->as_LoadStore()->barrier_data();
873 } else if (n->is_Mem()) {
874 return n->as_Mem()->barrier_data();
875 }
876 return 0;
877 }
878
879 //=============================================================================
880 // Should LoadNode::Ideal() attempt to remove control edges?
881 bool LoadNode::can_remove_control() const {
882 return !has_pinned_control_dependency();
883 }
884 uint LoadNode::size_of() const { return sizeof(*this); }
885 bool LoadNode::cmp(const Node &n) const {
886 LoadNode& load = (LoadNode &)n;
887 return Type::equals(_type, load._type) &&
888 _control_dependency == load._control_dependency &&
889 _mo == load._mo;
890 }
891 const Type *LoadNode::bottom_type() const { return _type; }
892 uint LoadNode::ideal_reg() const {
893 return _type->ideal_reg();
894 }
896 #ifndef PRODUCT
897 void LoadNode::dump_spec(outputStream *st) const {
898 MemNode::dump_spec(st);
899 if( !Verbose && !WizardMode ) {
900 // standard dump does this in Verbose and WizardMode
901 st->print(" #"); _type->dump_on(st);
902 }
903 if (!depends_only_on_test()) {
904 st->print(" (does not depend only on test, ");
905 if (control_dependency() == UnknownControl) {
906 st->print("unknown control");
907 } else if (control_dependency() == Pinned) {
908 st->print("pinned");
909 } else if (adr_type() == TypeRawPtr::BOTTOM) {
910 st->print("raw access");
911 } else {
912 st->print("unknown reason");
913 }
914 st->print(")");
915 }
916 }
917 #endif
918
919 #ifdef ASSERT
920 //----------------------------is_immutable_value-------------------------------
921 // Helper function to allow a raw load without control edge for some cases
922 bool LoadNode::is_immutable_value(Node* adr) {
923 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() &&
924 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal) {
925
926 jlong offset = adr->in(AddPNode::Offset)->find_intptr_t_con(-1);
927 int offsets[] = {
928 in_bytes(JavaThread::osthread_offset()),
929 in_bytes(JavaThread::threadObj_offset()),
930 in_bytes(JavaThread::vthread_offset()),
931 in_bytes(JavaThread::scopedValueCache_offset()),
932 };
933
934 for (size_t i = 0; i < sizeof offsets / sizeof offsets[0]; i++) {
935 if (offset == offsets[i]) {
1041 intptr_t ld_off = 0;
1042 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
1043 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
1044 if (ac != nullptr) {
1045 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
1046
1047 Node* mem = ac->in(TypeFunc::Memory);
1048 Node* ctl = ac->in(0);
1049 Node* src = ac->in(ArrayCopyNode::Src);
1050
1051 if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
1052 return nullptr;
1053 }
1054
1055 // load depends on the tests that validate the arraycopy
1056 LoadNode* ld = clone_pinned();
1057 Node* addp = in(MemNode::Address)->clone();
1058 if (ac->as_ArrayCopy()->is_clonebasic()) {
1059 assert(ld_alloc != nullptr, "need an alloc");
1060 assert(addp->is_AddP(), "address must be addp");
1061 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1062 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1063 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1064 addp->set_req(AddPNode::Base, src);
1065 addp->set_req(AddPNode::Address, src);
1066 } else {
1067 assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1068 ac->as_ArrayCopy()->is_copyof_validated() ||
1069 ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1070 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1071 addp->set_req(AddPNode::Base, src);
1072 addp->set_req(AddPNode::Address, src);
1073
1074 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1075 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1076 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1077
1078 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1079 uint shift = exact_log2(type2aelembytes(ary_elem));
1080
1081 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1082 #ifdef _LP64
1083 diff = phase->transform(new ConvI2LNode(diff));
1227 }
1228
1229 // A load from an initialization barrier can match a captured store.
1230 if (st->is_Proj() && st->in(0)->is_Initialize()) {
1231 InitializeNode* init = st->in(0)->as_Initialize();
1232 AllocateNode* alloc = init->allocation();
1233 if ((alloc != nullptr) && (alloc == ld_alloc)) {
1234 // examine a captured store value
1235 st = init->find_captured_store(ld_off, memory_size(), phase);
1236 if (st != nullptr) {
1237 continue; // take one more trip around
1238 }
1239 }
1240 }
1241
1242 // Load boxed value from result of valueOf() call is input parameter.
1243 if (this->is_Load() && ld_adr->is_AddP() &&
1244 (tp != nullptr) && tp->is_ptr_to_boxed_value()) {
1245 intptr_t ignore = 0;
1246 Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
1247 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1248 base = bs->step_over_gc_barrier(base);
1249 if (base != nullptr && base->is_Proj() &&
1250 base->as_Proj()->_con == TypeFunc::Parms &&
1251 base->in(0)->is_CallStaticJava() &&
1252 base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1253 return base->in(0)->in(TypeFunc::Parms);
1254 }
1255 }
1256
1257 break;
1258 }
1259
1260 return nullptr;
1261 }
1262
1263 //----------------------is_instance_field_load_with_local_phi------------------
1264 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1265 if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1266 in(Address)->is_AddP() ) {
1267 const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1268 // Only instances and boxed values.
2507 //------------------------------Identity---------------------------------------
2508 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2509 // Also feed through the klass in Allocate(...klass...)._klass.
2510 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2511 return klass_identity_common(phase);
2512 }
2513
2514 Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
2515 Node* x = LoadNode::Identity(phase);
2516 if (x != this) return x;
2517
2518 // Take apart the address into an oop and offset.
2519 // Return 'this' if we cannot.
2520 Node* adr = in(MemNode::Address);
2521 intptr_t offset = 0;
2522 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2523 if (base == nullptr) return this;
2524 const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2525 if (toop == nullptr) return this;
2526
2527 // Step over potential GC barrier for OopHandle resolve
2528 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2529 if (bs->is_gc_barrier_node(base)) {
2530 base = bs->step_over_gc_barrier(base);
2531 }
2532
2533 // We can fetch the klass directly through an AllocateNode.
2534 // This works even if the klass is not constant (clone or newArray).
2535 if (offset == oopDesc::klass_offset_in_bytes()) {
2536 Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2537 if (allocated_klass != nullptr) {
2538 return allocated_klass;
2539 }
2540 }
2541
2542 // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2543 // See inline_native_Class_query for occurrences of these patterns.
2544 // Java Example: x.getClass().isAssignableFrom(y)
2545 //
2546 // This improves reflective code, often making the Class
2547 // mirror go completely dead. (Current exception: Class
2548 // mirrors may appear in debug info, but we could clean them out by
2549 // introducing a new debug info operator for Klass.java_mirror).
2550
2551 if (toop->isa_instptr() && toop->is_instptr()->instance_klass() == phase->C->env()->Class_klass()
2552 && offset == java_lang_Class::klass_offset()) {
|
89 // fake the missing field
90 const TypePtr* _adr_type = nullptr;
91 if (in(Address) != nullptr)
92 _adr_type = in(Address)->bottom_type()->isa_ptr();
93 #endif
94 dump_adr_type(_adr_type, st);
95
96 Compile* C = Compile::current();
97 if (C->alias_type(_adr_type)->is_volatile()) {
98 st->print(" Volatile!");
99 }
100 if (_unaligned_access) {
101 st->print(" unaligned");
102 }
103 if (_mismatched_access) {
104 st->print(" mismatched");
105 }
106 if (_unsafe_access) {
107 st->print(" unsafe");
108 }
109 st->print(" barrier: %u", _barrier_data);
110 }
111
112 void MemNode::dump_adr_type(const TypePtr* adr_type, outputStream* st) {
113 st->print(" @");
114 if (adr_type == nullptr) {
115 st->print("null");
116 } else {
117 adr_type->dump_on(st);
118 Compile* C = Compile::current();
119 Compile::AliasType* atp = nullptr;
120 if (C->have_alias_type(adr_type)) atp = C->alias_type(adr_type);
121 if (atp == nullptr)
122 st->print(", idx=?\?;");
123 else if (atp->index() == Compile::AliasIdxBot)
124 st->print(", idx=Bot;");
125 else if (atp->index() == Compile::AliasIdxTop)
126 st->print(", idx=Top;");
127 else if (atp->index() == Compile::AliasIdxRaw)
128 st->print(", idx=Raw;");
129 else {
634 return ac;
635 }
636 }
637 }
638 }
639 }
640 return nullptr;
641 }
642
643 ArrayCopyNode* MemNode::find_array_copy_clone(Node* ld_alloc, Node* mem) const {
644 if (mem->is_Proj() && mem->in(0) != nullptr && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
645 mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
646 if (ld_alloc != nullptr) {
647 // Check if there is an array copy for a clone
648 Node* mb = mem->in(0);
649 ArrayCopyNode* ac = nullptr;
650 if (mb->in(0) != nullptr && mb->in(0)->is_Proj() &&
651 mb->in(0)->in(0) != nullptr && mb->in(0)->in(0)->is_ArrayCopy()) {
652 ac = mb->in(0)->in(0)->as_ArrayCopy();
653 } else {
654 Node* control_proj_ac = mb->in(0);
655 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
656 ac = control_proj_ac->in(0)->as_ArrayCopy();
657 }
658 }
659
660 if (ac != nullptr && ac->is_clonebasic()) {
661 AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest));
662 if (alloc != nullptr && alloc == ld_alloc) {
663 return ac;
664 }
665 }
666 }
667 }
668 return nullptr;
669 }
670
671 // The logic for reordering loads and stores uses four steps:
672 // (a) Walk carefully past stores and initializations which we
673 // can prove are independent of this load.
674 // (b) Observe that the next memory state makes an exact match
853 Compile* C = Compile::current();
854 assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
855 "must stay in the original alias category");
856 // The type of the address must be contained in the adr_type,
857 // disregarding "null"-ness.
858 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
859 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
860 assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
861 "real address must not escape from expected memory type");
862 }
863 #endif
864 return tp;
865 }
866 }
867
868 uint8_t MemNode::barrier_data(const Node* n) {
869 if (n->is_LoadStore()) {
870 return n->as_LoadStore()->barrier_data();
871 } else if (n->is_Mem()) {
872 return n->as_Mem()->barrier_data();
873 } else if (n->is_DecodeN()) {
874 return MemNode::barrier_data(n->in(1));
875 }
876 return 0;
877 }
878
879 //=============================================================================
880 // Should LoadNode::Ideal() attempt to remove control edges?
881 bool LoadNode::can_remove_control() const {
882 return !has_pinned_control_dependency();
883 }
884 uint LoadNode::size_of() const { return sizeof(*this); }
885 bool LoadNode::cmp(const Node &n) const {
886 LoadNode& load = (LoadNode &)n;
887 return Type::equals(_type, load._type) &&
888 _control_dependency == load._control_dependency &&
889 _mo == load._mo;
890 }
891 const Type *LoadNode::bottom_type() const { return _type; }
892 uint LoadNode::ideal_reg() const {
893 return _type->ideal_reg();
894 }
896 #ifndef PRODUCT
897 void LoadNode::dump_spec(outputStream *st) const {
898 MemNode::dump_spec(st);
899 if( !Verbose && !WizardMode ) {
900 // standard dump does this in Verbose and WizardMode
901 st->print(" #"); _type->dump_on(st);
902 }
903 if (!depends_only_on_test()) {
904 st->print(" (does not depend only on test, ");
905 if (control_dependency() == UnknownControl) {
906 st->print("unknown control");
907 } else if (control_dependency() == Pinned) {
908 st->print("pinned");
909 } else if (adr_type() == TypeRawPtr::BOTTOM) {
910 st->print("raw access");
911 } else {
912 st->print("unknown reason");
913 }
914 st->print(")");
915 }
916 if (is_acquire()) {
917 st->print("is_acquire");
918 }
919 }
920 #endif
921
922 #ifdef ASSERT
923 //----------------------------is_immutable_value-------------------------------
924 // Helper function to allow a raw load without control edge for some cases
925 bool LoadNode::is_immutable_value(Node* adr) {
926 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() &&
927 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal) {
928
929 jlong offset = adr->in(AddPNode::Offset)->find_intptr_t_con(-1);
930 int offsets[] = {
931 in_bytes(JavaThread::osthread_offset()),
932 in_bytes(JavaThread::threadObj_offset()),
933 in_bytes(JavaThread::vthread_offset()),
934 in_bytes(JavaThread::scopedValueCache_offset()),
935 };
936
937 for (size_t i = 0; i < sizeof offsets / sizeof offsets[0]; i++) {
938 if (offset == offsets[i]) {
1044 intptr_t ld_off = 0;
1045 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
1046 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
1047 if (ac != nullptr) {
1048 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
1049
1050 Node* mem = ac->in(TypeFunc::Memory);
1051 Node* ctl = ac->in(0);
1052 Node* src = ac->in(ArrayCopyNode::Src);
1053
1054 if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
1055 return nullptr;
1056 }
1057
1058 // load depends on the tests that validate the arraycopy
1059 LoadNode* ld = clone_pinned();
1060 Node* addp = in(MemNode::Address)->clone();
1061 if (ac->as_ArrayCopy()->is_clonebasic()) {
1062 assert(ld_alloc != nullptr, "need an alloc");
1063 assert(addp->is_AddP(), "address must be addp");
1064 assert(addp->in(AddPNode::Base) == ac->in(ArrayCopyNode::Dest), "strange pattern");
1065 assert(addp->in(AddPNode::Address) == ac->in(ArrayCopyNode::Dest), "strange pattern");
1066 addp->set_req(AddPNode::Base, src);
1067 addp->set_req(AddPNode::Address, src);
1068 } else {
1069 assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1070 ac->as_ArrayCopy()->is_copyof_validated() ||
1071 ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1072 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1073 addp->set_req(AddPNode::Base, src);
1074 addp->set_req(AddPNode::Address, src);
1075
1076 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1077 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1078 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1079
1080 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1081 uint shift = exact_log2(type2aelembytes(ary_elem));
1082
1083 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1084 #ifdef _LP64
1085 diff = phase->transform(new ConvI2LNode(diff));
1229 }
1230
1231 // A load from an initialization barrier can match a captured store.
1232 if (st->is_Proj() && st->in(0)->is_Initialize()) {
1233 InitializeNode* init = st->in(0)->as_Initialize();
1234 AllocateNode* alloc = init->allocation();
1235 if ((alloc != nullptr) && (alloc == ld_alloc)) {
1236 // examine a captured store value
1237 st = init->find_captured_store(ld_off, memory_size(), phase);
1238 if (st != nullptr) {
1239 continue; // take one more trip around
1240 }
1241 }
1242 }
1243
1244 // Load boxed value from result of valueOf() call is input parameter.
1245 if (this->is_Load() && ld_adr->is_AddP() &&
1246 (tp != nullptr) && tp->is_ptr_to_boxed_value()) {
1247 intptr_t ignore = 0;
1248 Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
1249 if (base != nullptr && base->is_Proj() &&
1250 base->as_Proj()->_con == TypeFunc::Parms &&
1251 base->in(0)->is_CallStaticJava() &&
1252 base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1253 return base->in(0)->in(TypeFunc::Parms);
1254 }
1255 }
1256
1257 break;
1258 }
1259
1260 return nullptr;
1261 }
1262
1263 //----------------------is_instance_field_load_with_local_phi------------------
1264 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1265 if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1266 in(Address)->is_AddP() ) {
1267 const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1268 // Only instances and boxed values.
2507 //------------------------------Identity---------------------------------------
2508 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2509 // Also feed through the klass in Allocate(...klass...)._klass.
2510 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2511 return klass_identity_common(phase);
2512 }
2513
2514 Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
2515 Node* x = LoadNode::Identity(phase);
2516 if (x != this) return x;
2517
2518 // Take apart the address into an oop and offset.
2519 // Return 'this' if we cannot.
2520 Node* adr = in(MemNode::Address);
2521 intptr_t offset = 0;
2522 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2523 if (base == nullptr) return this;
2524 const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2525 if (toop == nullptr) return this;
2526
2527 // We can fetch the klass directly through an AllocateNode.
2528 // This works even if the klass is not constant (clone or newArray).
2529 if (offset == oopDesc::klass_offset_in_bytes()) {
2530 Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2531 if (allocated_klass != nullptr) {
2532 return allocated_klass;
2533 }
2534 }
2535
2536 // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2537 // See inline_native_Class_query for occurrences of these patterns.
2538 // Java Example: x.getClass().isAssignableFrom(y)
2539 //
2540 // This improves reflective code, often making the Class
2541 // mirror go completely dead. (Current exception: Class
2542 // mirrors may appear in debug info, but we could clean them out by
2543 // introducing a new debug info operator for Klass.java_mirror).
2544
2545 if (toop->isa_instptr() && toop->is_instptr()->instance_klass() == phase->C->env()->Class_klass()
2546 && offset == java_lang_Class::klass_offset()) {
|