< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/connode.hpp"
  38 #include "opto/convertnode.hpp"
  39 #include "opto/loopnode.hpp"
  40 #include "opto/machnode.hpp"
  41 #include "opto/matcher.hpp"
  42 #include "opto/memnode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/phaseX.hpp"
  46 #include "opto/regmask.hpp"
  47 #include "opto/rootnode.hpp"
  48 #include "utilities/align.hpp"
  49 #include "utilities/copy.hpp"
  50 #include "utilities/macros.hpp"
  51 #include "utilities/vmError.hpp"
  52 #if INCLUDE_ZGC
  53 #include "gc/z/c2/zBarrierSetC2.hpp"
  54 #endif
  55 
  56 // Portions of code courtesy of Clifford Click
  57 
  58 // Optimization - Graph Style
  59 
  60 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  61 
  62 //=============================================================================
  63 uint MemNode::size_of() const { return sizeof(*this); }
  64 
  65 const TypePtr *MemNode::adr_type() const {
  66   Node* adr = in(Address);
  67   if (adr == NULL)  return NULL; // node is dead


 312       phase->is_IterGVN()->_worklist.push(this);
 313       return NodeSentinel; // caller will return NULL
 314     }
 315   }
 316   // Ignore if memory is dead, or self-loop
 317   Node *mem = in(MemNode::Memory);
 318   if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
 319   assert(mem != this, "dead loop in MemNode::Ideal");
 320 
 321   if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
 322     // This memory slice may be dead.
 323     // Delay this mem node transformation until the memory is processed.
 324     phase->is_IterGVN()->_worklist.push(this);
 325     return NodeSentinel; // caller will return NULL
 326   }
 327 
 328   Node *address = in(MemNode::Address);
 329   const Type *t_adr = phase->type(address);
 330   if (t_adr == Type::TOP)              return NodeSentinel; // caller will return NULL
 331 
 332   if (can_reshape && is_unsafe_access() && (t_adr == TypePtr::NULL_PTR)) {
 333     // Unsafe off-heap access with zero address. Remove access and other control users
 334     // to not confuse optimizations and add a HaltNode to fail if this is ever executed.
 335     assert(ctl != NULL, "unsafe accesses should be control dependent");
 336     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
 337       Node* u = ctl->fast_out(i);
 338       if (u != ctl) {
 339         igvn->rehash_node_delayed(u);
 340         int nb = u->replace_edge(ctl, phase->C->top());
 341         --i, imax -= nb;
 342       }
 343     }
 344     Node* frame = igvn->transform(new ParmNode(phase->C->start(), TypeFunc::FramePtr));
 345     Node* halt = igvn->transform(new HaltNode(ctl, frame));
 346     phase->C->root()->add_req(halt);
 347     return this;
 348   }
 349 
 350   if (can_reshape && igvn != NULL &&
 351       (igvn->_worklist.member(address) ||
 352        (igvn->_worklist.size() > 0 && t_adr != adr_type())) ) {
 353     // The address's base and type may change when the address is processed.
 354     // Delay this mem node transformation until the address is processed.
 355     phase->is_IterGVN()->_worklist.push(this);
 356     return NodeSentinel; // caller will return NULL
 357   }
 358 
 359   // Do NOT remove or optimize the next lines: ensure a new alias index
 360   // is allocated for an oop pointer type before Escape Analysis.
 361   // Note: C++ will not remove it since the call has side effect.
 362   if (t_adr->isa_oopptr()) {
 363     int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
 364   }
 365 
 366   Node* base = NULL;
 367   if (address->is_AddP()) {
 368     base = address->in(AddPNode::Base);
 369   }


 910   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
 911 }
 912 
 913 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
 914   if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
 915     bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
 916     bool is_stable_ary = FoldStableValues &&
 917                          (tp != NULL) && (tp->isa_aryptr() != NULL) &&
 918                          tp->isa_aryptr()->is_stable();
 919 
 920     return (eliminate_boxing && non_volatile) || is_stable_ary;
 921   }
 922 
 923   return false;
 924 }
 925 
 926 // Is the value loaded previously stored by an arraycopy? If so return
 927 // a load node that reads from the source array so we may be able to
 928 // optimize out the ArrayCopy node later.
 929 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {








 930   Node* ld_adr = in(MemNode::Address);
 931   intptr_t ld_off = 0;
 932   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
 933   Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
 934   if (ac != NULL) {
 935     assert(ac->is_ArrayCopy(), "what kind of node can this be?");
 936 
 937     Node* mem = ac->in(TypeFunc::Memory);
 938     Node* ctl = ac->in(0);
 939     Node* src = ac->in(ArrayCopyNode::Src);
 940 
 941     if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
 942       return NULL;
 943     }
 944 
 945     LoadNode* ld = clone()->as_Load();
 946     Node* addp = in(MemNode::Address)->clone();
 947     if (ac->as_ArrayCopy()->is_clonebasic()) {
 948       assert(ld_alloc != NULL, "need an alloc");
 949       assert(addp->is_AddP(), "address must be addp");


2805   // If extra input is TOP ==> the result is TOP
2806   t = phase->type( in(MemNode::OopStore) );
2807   if( t == Type::TOP ) return Type::TOP;
2808 
2809   return StoreNode::Value( phase );
2810 }
2811 
2812 
2813 //=============================================================================
2814 //----------------------------------SCMemProjNode------------------------------
2815 const Type* SCMemProjNode::Value(PhaseGVN* phase) const
2816 {
2817   return bottom_type();
2818 }
2819 
2820 //=============================================================================
2821 //----------------------------------LoadStoreNode------------------------------
2822 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
2823   : Node(required),
2824     _type(rt),
2825     _adr_type(at),
2826     _has_barrier(false)
2827 {
2828   init_req(MemNode::Control, c  );
2829   init_req(MemNode::Memory , mem);
2830   init_req(MemNode::Address, adr);
2831   init_req(MemNode::ValueIn, val);
2832   init_class_id(Class_LoadStore);
2833 }
2834 
2835 uint LoadStoreNode::ideal_reg() const {
2836   return _type->ideal_reg();
2837 }
2838 
2839 bool LoadStoreNode::result_not_used() const {
2840   for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
2841     Node *x = fast_out(i);
2842     if (x->Opcode() == Op_SCMemProj) continue;
2843     return false;
2844   }
2845   return true;
2846 }


3099   if (trailing_store() || trailing_load_store()) {
3100     MemBarNode* leading = leading_membar();
3101     if (leading != NULL) {
3102       assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars");
3103       leading->remove(igvn);
3104     }
3105   }
3106   igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
3107   igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
3108 }
3109 
3110 //------------------------------Ideal------------------------------------------
3111 // Return a node which is more "ideal" than the current node.  Strip out
3112 // control copies
3113 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3114   if (remove_dead_region(phase, can_reshape)) return this;
3115   // Don't bother trying to transform a dead node
3116   if (in(0) && in(0)->is_top()) {
3117     return NULL;
3118   }










3119 
3120   bool progress = false;
3121   // Eliminate volatile MemBars for scalar replaced objects.
3122   if (can_reshape && req() == (Precedent+1)) {
3123     bool eliminate = false;
3124     int opc = Opcode();
3125     if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
3126       // Volatile field loads and stores.
3127       Node* my_mem = in(MemBarNode::Precedent);
3128       // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
3129       if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
3130         // if the Precedent is a decodeN and its input (a Load) is used at more than one place,
3131         // replace this Precedent (decodeN) with the Load instead.
3132         if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1))  {
3133           Node* load_node = my_mem->in(1);
3134           set_req(MemBarNode::Precedent, load_node);
3135           phase->is_IterGVN()->_worklist.push(my_mem);
3136           my_mem = load_node;
3137         } else {
3138           assert(my_mem->unique_out() == this, "sanity");


   1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/connode.hpp"
  38 #include "opto/convertnode.hpp"
  39 #include "opto/loopnode.hpp"
  40 #include "opto/machnode.hpp"
  41 #include "opto/matcher.hpp"
  42 #include "opto/memnode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/phaseX.hpp"
  46 #include "opto/regmask.hpp"

  47 #include "utilities/align.hpp"
  48 #include "utilities/copy.hpp"
  49 #include "utilities/macros.hpp"
  50 #include "utilities/vmError.hpp"
  51 #if INCLUDE_ZGC
  52 #include "gc/z/c2/zBarrierSetC2.hpp"
  53 #endif
  54 
  55 // Portions of code courtesy of Clifford Click
  56 
  57 // Optimization - Graph Style
  58 
  59 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  60 
  61 //=============================================================================
  62 uint MemNode::size_of() const { return sizeof(*this); }
  63 
  64 const TypePtr *MemNode::adr_type() const {
  65   Node* adr = in(Address);
  66   if (adr == NULL)  return NULL; // node is dead


 311       phase->is_IterGVN()->_worklist.push(this);
 312       return NodeSentinel; // caller will return NULL
 313     }
 314   }
 315   // Ignore if memory is dead, or self-loop
 316   Node *mem = in(MemNode::Memory);
 317   if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
 318   assert(mem != this, "dead loop in MemNode::Ideal");
 319 
 320   if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
 321     // This memory slice may be dead.
 322     // Delay this mem node transformation until the memory is processed.
 323     phase->is_IterGVN()->_worklist.push(this);
 324     return NodeSentinel; // caller will return NULL
 325   }
 326 
 327   Node *address = in(MemNode::Address);
 328   const Type *t_adr = phase->type(address);
 329   if (t_adr == Type::TOP)              return NodeSentinel; // caller will return NULL
 330 


















 331   if (can_reshape && igvn != NULL &&
 332       (igvn->_worklist.member(address) ||
 333        (igvn->_worklist.size() > 0 && t_adr != adr_type())) ) {
 334     // The address's base and type may change when the address is processed.
 335     // Delay this mem node transformation until the address is processed.
 336     phase->is_IterGVN()->_worklist.push(this);
 337     return NodeSentinel; // caller will return NULL
 338   }
 339 
 340   // Do NOT remove or optimize the next lines: ensure a new alias index
 341   // is allocated for an oop pointer type before Escape Analysis.
 342   // Note: C++ will not remove it since the call has side effect.
 343   if (t_adr->isa_oopptr()) {
 344     int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
 345   }
 346 
 347   Node* base = NULL;
 348   if (address->is_AddP()) {
 349     base = address->in(AddPNode::Base);
 350   }


 891   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
 892 }
 893 
 894 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
 895   if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
 896     bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
 897     bool is_stable_ary = FoldStableValues &&
 898                          (tp != NULL) && (tp->isa_aryptr() != NULL) &&
 899                          tp->isa_aryptr()->is_stable();
 900 
 901     return (eliminate_boxing && non_volatile) || is_stable_ary;
 902   }
 903 
 904   return false;
 905 }
 906 
 907 // Is the value loaded previously stored by an arraycopy? If so return
 908 // a load node that reads from the source array so we may be able to
 909 // optimize out the ArrayCopy node later.
 910 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
 911 #if INCLUDE_ZGC
 912   if (UseZGC) {
 913     if (bottom_type()->make_oopptr() != NULL) {
 914       return NULL;
 915     }
 916   }
 917 #endif
 918 
 919   Node* ld_adr = in(MemNode::Address);
 920   intptr_t ld_off = 0;
 921   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
 922   Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
 923   if (ac != NULL) {
 924     assert(ac->is_ArrayCopy(), "what kind of node can this be?");
 925 
 926     Node* mem = ac->in(TypeFunc::Memory);
 927     Node* ctl = ac->in(0);
 928     Node* src = ac->in(ArrayCopyNode::Src);
 929 
 930     if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
 931       return NULL;
 932     }
 933 
 934     LoadNode* ld = clone()->as_Load();
 935     Node* addp = in(MemNode::Address)->clone();
 936     if (ac->as_ArrayCopy()->is_clonebasic()) {
 937       assert(ld_alloc != NULL, "need an alloc");
 938       assert(addp->is_AddP(), "address must be addp");


2794   // If extra input is TOP ==> the result is TOP
2795   t = phase->type( in(MemNode::OopStore) );
2796   if( t == Type::TOP ) return Type::TOP;
2797 
2798   return StoreNode::Value( phase );
2799 }
2800 
2801 
2802 //=============================================================================
2803 //----------------------------------SCMemProjNode------------------------------
2804 const Type* SCMemProjNode::Value(PhaseGVN* phase) const
2805 {
2806   return bottom_type();
2807 }
2808 
2809 //=============================================================================
2810 //----------------------------------LoadStoreNode------------------------------
2811 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
2812   : Node(required),
2813     _type(rt),
2814     _adr_type(at)

2815 {
2816   init_req(MemNode::Control, c  );
2817   init_req(MemNode::Memory , mem);
2818   init_req(MemNode::Address, adr);
2819   init_req(MemNode::ValueIn, val);
2820   init_class_id(Class_LoadStore);
2821 }
2822 
2823 uint LoadStoreNode::ideal_reg() const {
2824   return _type->ideal_reg();
2825 }
2826 
2827 bool LoadStoreNode::result_not_used() const {
2828   for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
2829     Node *x = fast_out(i);
2830     if (x->Opcode() == Op_SCMemProj) continue;
2831     return false;
2832   }
2833   return true;
2834 }


3087   if (trailing_store() || trailing_load_store()) {
3088     MemBarNode* leading = leading_membar();
3089     if (leading != NULL) {
3090       assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars");
3091       leading->remove(igvn);
3092     }
3093   }
3094   igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
3095   igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
3096 }
3097 
3098 //------------------------------Ideal------------------------------------------
3099 // Return a node which is more "ideal" than the current node.  Strip out
3100 // control copies
3101 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3102   if (remove_dead_region(phase, can_reshape)) return this;
3103   // Don't bother trying to transform a dead node
3104   if (in(0) && in(0)->is_top()) {
3105     return NULL;
3106   }
3107 
3108 #if INCLUDE_ZGC
3109   if (UseZGC) {
3110     if (req() == (Precedent+1) && in(MemBarNode::Precedent)->in(0) != NULL && in(MemBarNode::Precedent)->in(0)->is_LoadBarrier()) {
3111       Node* load_node = in(MemBarNode::Precedent)->in(0)->in(LoadBarrierNode::Oop);
3112       set_req(MemBarNode::Precedent, load_node);
3113       return this;
3114     }
3115   }
3116 #endif
3117 
3118   bool progress = false;
3119   // Eliminate volatile MemBars for scalar replaced objects.
3120   if (can_reshape && req() == (Precedent+1)) {
3121     bool eliminate = false;
3122     int opc = Opcode();
3123     if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
3124       // Volatile field loads and stores.
3125       Node* my_mem = in(MemBarNode::Precedent);
3126       // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
3127       if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
3128         // if the Precedent is a decodeN and its input (a Load) is used at more than one place,
3129         // replace this Precedent (decodeN) with the Load instead.
3130         if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1))  {
3131           Node* load_node = my_mem->in(1);
3132           set_req(MemBarNode::Precedent, load_node);
3133           phase->is_IterGVN()->_worklist.push(my_mem);
3134           my_mem = load_node;
3135         } else {
3136           assert(my_mem->unique_out() == this, "sanity");


< prev index next >