< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

  89   // fake the missing field
  90   const TypePtr* _adr_type = nullptr;
  91   if (in(Address) != nullptr)
  92     _adr_type = in(Address)->bottom_type()->isa_ptr();
  93 #endif
  94   dump_adr_type(this, _adr_type, st);
  95 
  96   Compile* C = Compile::current();
  97   if (C->alias_type(_adr_type)->is_volatile()) {
  98     st->print(" Volatile!");
  99   }
 100   if (_unaligned_access) {
 101     st->print(" unaligned");
 102   }
 103   if (_mismatched_access) {
 104     st->print(" mismatched");
 105   }
 106   if (_unsafe_access) {
 107     st->print(" unsafe");
 108   }

 109 }
 110 
 111 void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
 112   st->print(" @");
 113   if (adr_type == nullptr) {
 114     st->print("null");
 115   } else {
 116     adr_type->dump_on(st);
 117     Compile* C = Compile::current();
 118     Compile::AliasType* atp = nullptr;
 119     if (C->have_alias_type(adr_type))  atp = C->alias_type(adr_type);
 120     if (atp == nullptr)
 121       st->print(", idx=?\?;");
 122     else if (atp->index() == Compile::AliasIdxBot)
 123       st->print(", idx=Bot;");
 124     else if (atp->index() == Compile::AliasIdxTop)
 125       st->print(", idx=Top;");
 126     else if (atp->index() == Compile::AliasIdxRaw)
 127       st->print(", idx=Raw;");
 128     else {

 633             return ac;
 634           }
 635         }
 636       }
 637     }
 638   }
 639   return nullptr;
 640 }
 641 
 642 ArrayCopyNode* MemNode::find_array_copy_clone(Node* ld_alloc, Node* mem) const {
 643   if (mem->is_Proj() && mem->in(0) != nullptr && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
 644                                                mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
 645     if (ld_alloc != nullptr) {
 646       // Check if there is an array copy for a clone
 647       Node* mb = mem->in(0);
 648       ArrayCopyNode* ac = nullptr;
 649       if (mb->in(0) != nullptr && mb->in(0)->is_Proj() &&
 650           mb->in(0)->in(0) != nullptr && mb->in(0)->in(0)->is_ArrayCopy()) {
 651         ac = mb->in(0)->in(0)->as_ArrayCopy();
 652       } else {
 653         // Step over GC barrier when ReduceInitialCardMarks is disabled
 654         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 655         Node* control_proj_ac = bs->step_over_gc_barrier(mb->in(0));
 656 
 657         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
 658           ac = control_proj_ac->in(0)->as_ArrayCopy();
 659         }
 660       }
 661 
 662       if (ac != nullptr && ac->is_clonebasic()) {
 663         AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest));
 664         if (alloc != nullptr && alloc == ld_alloc) {
 665           return ac;
 666         }
 667       }
 668     }
 669   }
 670   return nullptr;
 671 }
 672 
 673 // The logic for reordering loads and stores uses four steps:
 674 // (a) Walk carefully past stores and initializations which we
 675 //     can prove are independent of this load.
 676 // (b) Observe that the next memory state makes an exact match

1041   intptr_t ld_off = 0;
1042   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
1043   Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
1044   if (ac != nullptr) {
1045     assert(ac->is_ArrayCopy(), "what kind of node can this be?");
1046 
1047     Node* mem = ac->in(TypeFunc::Memory);
1048     Node* ctl = ac->in(0);
1049     Node* src = ac->in(ArrayCopyNode::Src);
1050 
1051     if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
1052       return nullptr;
1053     }
1054 
1055     // load depends on the tests that validate the arraycopy
1056     LoadNode* ld = clone_pinned();
1057     Node* addp = in(MemNode::Address)->clone();
1058     if (ac->as_ArrayCopy()->is_clonebasic()) {
1059       assert(ld_alloc != nullptr, "need an alloc");
1060       assert(addp->is_AddP(), "address must be addp");
1061       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1062       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1063       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1064       addp->set_req(AddPNode::Base, src);
1065       addp->set_req(AddPNode::Address, src);
1066     } else {
1067       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1068              ac->as_ArrayCopy()->is_copyof_validated() ||
1069              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1070       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1071       addp->set_req(AddPNode::Base, src);
1072       addp->set_req(AddPNode::Address, src);
1073 
1074       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1075       BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1076       if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1077 
1078       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1079       uint shift  = exact_log2(type2aelembytes(ary_elem));
1080 
1081       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1082 #ifdef _LP64
1083       diff = phase->transform(new ConvI2LNode(diff));

1227     }
1228 
1229     // A load from an initialization barrier can match a captured store.
1230     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1231       InitializeNode* init = st->in(0)->as_Initialize();
1232       AllocateNode* alloc = init->allocation();
1233       if ((alloc != nullptr) && (alloc == ld_alloc)) {
1234         // examine a captured store value
1235         st = init->find_captured_store(ld_off, memory_size(), phase);
1236         if (st != nullptr) {
1237           continue;             // take one more trip around
1238         }
1239       }
1240     }
1241 
1242     // Load boxed value from result of valueOf() call is input parameter.
1243     if (this->is_Load() && ld_adr->is_AddP() &&
1244         (tp != nullptr) && tp->is_ptr_to_boxed_value()) {
1245       intptr_t ignore = 0;
1246       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
1247       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1248       base = bs->step_over_gc_barrier(base);
1249       if (base != nullptr && base->is_Proj() &&
1250           base->as_Proj()->_con == TypeFunc::Parms &&
1251           base->in(0)->is_CallStaticJava() &&
1252           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1253         return base->in(0)->in(TypeFunc::Parms);
1254       }
1255     }
1256 
1257     break;
1258   }
1259 
1260   return nullptr;
1261 }
1262 
1263 //----------------------is_instance_field_load_with_local_phi------------------
1264 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1265   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1266       in(Address)->is_AddP() ) {
1267     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1268     // Only instances and boxed values.

2508 //------------------------------Identity---------------------------------------
2509 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2510 // Also feed through the klass in Allocate(...klass...)._klass.
2511 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2512   return klass_identity_common(phase);
2513 }
2514 
2515 Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
2516   Node* x = LoadNode::Identity(phase);
2517   if (x != this)  return x;
2518 
2519   // Take apart the address into an oop and offset.
2520   // Return 'this' if we cannot.
2521   Node*    adr    = in(MemNode::Address);
2522   intptr_t offset = 0;
2523   Node*    base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2524   if (base == nullptr)     return this;
2525   const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2526   if (toop == nullptr)     return this;
2527 
2528   // Step over potential GC barrier for OopHandle resolve
2529   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2530   if (bs->is_gc_barrier_node(base)) {
2531     base = bs->step_over_gc_barrier(base);
2532   }
2533 
2534   // We can fetch the klass directly through an AllocateNode.
2535   // This works even if the klass is not constant (clone or newArray).
2536   if (offset == oopDesc::klass_offset_in_bytes()) {
2537     Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2538     if (allocated_klass != nullptr) {
2539       return allocated_klass;
2540     }
2541   }
2542 
2543   // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2544   // See inline_native_Class_query for occurrences of these patterns.
2545   // Java Example:  x.getClass().isAssignableFrom(y)
2546   //
2547   // This improves reflective code, often making the Class
2548   // mirror go completely dead.  (Current exception:  Class
2549   // mirrors may appear in debug info, but we could clean them out by
2550   // introducing a new debug info operator for Klass.java_mirror).
2551 
2552   if (toop->isa_instptr() && toop->is_instptr()->instance_klass() == phase->C->env()->Class_klass()
2553       && offset == java_lang_Class::klass_offset()) {

  89   // fake the missing field
  90   const TypePtr* _adr_type = nullptr;
  91   if (in(Address) != nullptr)
  92     _adr_type = in(Address)->bottom_type()->isa_ptr();
  93 #endif
  94   dump_adr_type(this, _adr_type, st);
  95 
  96   Compile* C = Compile::current();
  97   if (C->alias_type(_adr_type)->is_volatile()) {
  98     st->print(" Volatile!");
  99   }
 100   if (_unaligned_access) {
 101     st->print(" unaligned");
 102   }
 103   if (_mismatched_access) {
 104     st->print(" mismatched");
 105   }
 106   if (_unsafe_access) {
 107     st->print(" unsafe");
 108   }
 109   st->print(" barrier: %u", _barrier_data);
 110 }
 111 
 112 void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
 113   st->print(" @");
 114   if (adr_type == nullptr) {
 115     st->print("null");
 116   } else {
 117     adr_type->dump_on(st);
 118     Compile* C = Compile::current();
 119     Compile::AliasType* atp = nullptr;
 120     if (C->have_alias_type(adr_type))  atp = C->alias_type(adr_type);
 121     if (atp == nullptr)
 122       st->print(", idx=?\?;");
 123     else if (atp->index() == Compile::AliasIdxBot)
 124       st->print(", idx=Bot;");
 125     else if (atp->index() == Compile::AliasIdxTop)
 126       st->print(", idx=Top;");
 127     else if (atp->index() == Compile::AliasIdxRaw)
 128       st->print(", idx=Raw;");
 129     else {

 634             return ac;
 635           }
 636         }
 637       }
 638     }
 639   }
 640   return nullptr;
 641 }
 642 
 643 ArrayCopyNode* MemNode::find_array_copy_clone(Node* ld_alloc, Node* mem) const {
 644   if (mem->is_Proj() && mem->in(0) != nullptr && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
 645                                                mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
 646     if (ld_alloc != nullptr) {
 647       // Check if there is an array copy for a clone
 648       Node* mb = mem->in(0);
 649       ArrayCopyNode* ac = nullptr;
 650       if (mb->in(0) != nullptr && mb->in(0)->is_Proj() &&
 651           mb->in(0)->in(0) != nullptr && mb->in(0)->in(0)->is_ArrayCopy()) {
 652         ac = mb->in(0)->in(0)->as_ArrayCopy();
 653       } else {
 654         Node* control_proj_ac = mb->in(0);



 655         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
 656           ac = control_proj_ac->in(0)->as_ArrayCopy();
 657         }
 658       }
 659 
 660       if (ac != nullptr && ac->is_clonebasic()) {
 661         AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest));
 662         if (alloc != nullptr && alloc == ld_alloc) {
 663           return ac;
 664         }
 665       }
 666     }
 667   }
 668   return nullptr;
 669 }
 670 
 671 // The logic for reordering loads and stores uses four steps:
 672 // (a) Walk carefully past stores and initializations which we
 673 //     can prove are independent of this load.
 674 // (b) Observe that the next memory state makes an exact match

1039   intptr_t ld_off = 0;
1040   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
1041   Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
1042   if (ac != nullptr) {
1043     assert(ac->is_ArrayCopy(), "what kind of node can this be?");
1044 
1045     Node* mem = ac->in(TypeFunc::Memory);
1046     Node* ctl = ac->in(0);
1047     Node* src = ac->in(ArrayCopyNode::Src);
1048 
1049     if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
1050       return nullptr;
1051     }
1052 
1053     // load depends on the tests that validate the arraycopy
1054     LoadNode* ld = clone_pinned();
1055     Node* addp = in(MemNode::Address)->clone();
1056     if (ac->as_ArrayCopy()->is_clonebasic()) {
1057       assert(ld_alloc != nullptr, "need an alloc");
1058       assert(addp->is_AddP(), "address must be addp");
1059       assert(addp->in(AddPNode::Base) == ac->in(ArrayCopyNode::Dest), "strange pattern");
1060       assert(addp->in(AddPNode::Address) == ac->in(ArrayCopyNode::Dest), "strange pattern");

1061       addp->set_req(AddPNode::Base, src);
1062       addp->set_req(AddPNode::Address, src);
1063     } else {
1064       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1065              ac->as_ArrayCopy()->is_copyof_validated() ||
1066              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1067       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1068       addp->set_req(AddPNode::Base, src);
1069       addp->set_req(AddPNode::Address, src);
1070 
1071       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1072       BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1073       if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1074 
1075       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1076       uint shift  = exact_log2(type2aelembytes(ary_elem));
1077 
1078       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1079 #ifdef _LP64
1080       diff = phase->transform(new ConvI2LNode(diff));

1224     }
1225 
1226     // A load from an initialization barrier can match a captured store.
1227     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1228       InitializeNode* init = st->in(0)->as_Initialize();
1229       AllocateNode* alloc = init->allocation();
1230       if ((alloc != nullptr) && (alloc == ld_alloc)) {
1231         // examine a captured store value
1232         st = init->find_captured_store(ld_off, memory_size(), phase);
1233         if (st != nullptr) {
1234           continue;             // take one more trip around
1235         }
1236       }
1237     }
1238 
1239     // Load boxed value from result of valueOf() call is input parameter.
1240     if (this->is_Load() && ld_adr->is_AddP() &&
1241         (tp != nullptr) && tp->is_ptr_to_boxed_value()) {
1242       intptr_t ignore = 0;
1243       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);


1244       if (base != nullptr && base->is_Proj() &&
1245           base->as_Proj()->_con == TypeFunc::Parms &&
1246           base->in(0)->is_CallStaticJava() &&
1247           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1248         return base->in(0)->in(TypeFunc::Parms);
1249       }
1250     }
1251 
1252     break;
1253   }
1254 
1255   return nullptr;
1256 }
1257 
1258 //----------------------is_instance_field_load_with_local_phi------------------
1259 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1260   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1261       in(Address)->is_AddP() ) {
1262     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1263     // Only instances and boxed values.

2503 //------------------------------Identity---------------------------------------
2504 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2505 // Also feed through the klass in Allocate(...klass...)._klass.
2506 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2507   return klass_identity_common(phase);
2508 }
2509 
2510 Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
2511   Node* x = LoadNode::Identity(phase);
2512   if (x != this)  return x;
2513 
2514   // Take apart the address into an oop and offset.
2515   // Return 'this' if we cannot.
2516   Node*    adr    = in(MemNode::Address);
2517   intptr_t offset = 0;
2518   Node*    base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2519   if (base == nullptr)     return this;
2520   const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2521   if (toop == nullptr)     return this;
2522 






2523   // We can fetch the klass directly through an AllocateNode.
2524   // This works even if the klass is not constant (clone or newArray).
2525   if (offset == oopDesc::klass_offset_in_bytes()) {
2526     Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2527     if (allocated_klass != nullptr) {
2528       return allocated_klass;
2529     }
2530   }
2531 
2532   // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2533   // See inline_native_Class_query for occurrences of these patterns.
2534   // Java Example:  x.getClass().isAssignableFrom(y)
2535   //
2536   // This improves reflective code, often making the Class
2537   // mirror go completely dead.  (Current exception:  Class
2538   // mirrors may appear in debug info, but we could clean them out by
2539   // introducing a new debug info operator for Klass.java_mirror).
2540 
2541   if (toop->isa_instptr() && toop->is_instptr()->instance_klass() == phase->C->env()->Class_klass()
2542       && offset == java_lang_Class::klass_offset()) {
< prev index next >