< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "oops/objArrayKlass.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/compile.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/loopnode.hpp"
  35 #include "opto/machnode.hpp"
  36 #include "opto/matcher.hpp"
  37 #include "opto/memnode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/regmask.hpp"





  41 
  42 // Portions of code courtesy of Clifford Click
  43 
  44 // Optimization - Graph Style
  45 
  46 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  47 
  48 //=============================================================================
  49 uint MemNode::size_of() const { return sizeof(*this); }
  50 
  51 const TypePtr *MemNode::adr_type() const {
  52   Node* adr = in(Address);
  53   const TypePtr* cross_check = NULL;
  54   DEBUG_ONLY(cross_check = _adr_type);
  55   return calculate_adr_type(adr->bottom_type(), cross_check);
  56 }
  57 
  58 bool MemNode::check_if_adr_maybe_raw(Node* adr) {
  59   if (adr != NULL) {
  60     if (adr->bottom_type()->base() == Type::RawPtr || adr->bottom_type()->base() == Type::AnyPtr) {


 655     //            TypeRawPtr::BOTTOM.  Needs to be investigated.
 656     if (cross_check != NULL &&
 657         cross_check != TypePtr::BOTTOM &&
 658         cross_check != TypeRawPtr::BOTTOM) {
 659       // Recheck the alias index, to see if it has changed (due to a bug).
 660       Compile* C = Compile::current();
 661       assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
 662              "must stay in the original alias category");
 663       // The type of the address must be contained in the adr_type,
 664       // disregarding "null"-ness.
 665       // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
 666       const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
 667       assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
 668              "real address must not escape from expected memory type");
 669     }
 670     #endif
 671     return tp;
 672   }
 673 }
 674 
 675 //------------------------adr_phi_is_loop_invariant----------------------------
 676 // A helper function for Ideal_DU_postCCP to check if a Phi in a counted
 677 // loop is loop invariant. Make a quick traversal of Phi and associated
 678 // CastPP nodes, looking to see if they are a closed group within the loop.
 679 bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) {
 680   // The idea is that the phi-nest must boil down to only CastPP nodes
 681   // with the same data. This implies that any path into the loop already
 682   // includes such a CastPP, and so the original cast, whatever its input,
 683   // must be covered by an equivalent cast, with an earlier control input.
 684   ResourceMark rm;
 685 
 686   // The loop entry input of the phi should be the unique dominating
 687   // node for every Phi/CastPP in the loop.
 688   Unique_Node_List closure;
 689   closure.push(adr_phi->in(LoopNode::EntryControl));
 690 
 691   // Add the phi node and the cast to the worklist.
 692   Unique_Node_List worklist;
 693   worklist.push(adr_phi);
 694   if( cast != NULL ){
 695     if( !cast->is_ConstraintCast() ) return false;
 696     worklist.push(cast);
 697   }
 698 
 699   // Begin recursive walk of phi nodes.
 700   while( worklist.size() ){
 701     // Take a node off the worklist
 702     Node *n = worklist.pop();
 703     if( !closure.member(n) ){
 704       // Add it to the closure.
 705       closure.push(n);
 706       // Make a sanity check to ensure we don't waste too much time here.
 707       if( closure.size() > 20) return false;
 708       // This node is OK if:
 709       //  - it is a cast of an identical value
 710       //  - or it is a phi node (then we add its inputs to the worklist)
 711       // Otherwise, the node is not OK, and we presume the cast is not invariant
 712       if( n->is_ConstraintCast() ){
 713         worklist.push(n->in(1));
 714       } else if( n->is_Phi() ) {
 715         for( uint i = 1; i < n->req(); i++ ) {
 716           worklist.push(n->in(i));
 717         }
 718       } else {
 719         return false;
 720       }
 721     }
 722   }
 723 
 724   // Quit when the worklist is empty, and we've found no offending nodes.
 725   return true;
 726 }
 727 
 728 //------------------------------Ideal_DU_postCCP-------------------------------
 729 // Find any cast-away of null-ness and keep its control.  Null cast-aways are
 730 // going away in this pass and we need to make this memory op depend on the
 731 // gating null check.
 732 Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
 733   return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address));
 734 }
 735 
 736 // I tried to leave the CastPP's in.  This makes the graph more accurate in
 737 // some sense; we get to keep around the knowledge that an oop is not-null
 738 // after some test.  Alas, the CastPP's interfere with GVN (some values are
 739 // the regular oop, some are the CastPP of the oop, all merge at Phi's which
 740 // cannot collapse, etc).  This cost us 10% on SpecJVM, even when I removed
 741 // some of the more trivial cases in the optimizer.  Removing more useless
 742 // Phi's started allowing Loads to illegally float above null checks.  I gave
 743 // up on this approach.  CNC 10/20/2000
 744 // This static method may be called not from MemNode (EncodePNode calls it).
 745 // Only the control edge of the node 'n' might be updated.
 746 Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
 747   Node *skipped_cast = NULL;
 748   // Need a null check?  Regular static accesses do not because they are
 749   // from constant addresses.  Array ops are gated by the range check (which
 750   // always includes a NULL check).  Just check field ops.
 751   if( n->in(MemNode::Control) == NULL ) {
 752     // Scan upwards for the highest location we can place this memory op.
 753     while( true ) {
 754       switch( adr->Opcode() ) {
 755 
 756       case Op_AddP:             // No change to NULL-ness, so peek thru AddP's
 757         adr = adr->in(AddPNode::Base);
 758         continue;
 759 
 760       case Op_DecodeN:         // No change to NULL-ness, so peek thru
 761       case Op_DecodeNKlass:
 762         adr = adr->in(1);
 763         continue;
 764 
 765       case Op_EncodeP:
 766       case Op_EncodePKlass:
 767         // EncodeP node's control edge could be set by this method
 768         // when EncodeP node depends on CastPP node.
 769         //
 770         // Use its control edge for memory op because EncodeP may go away
 771         // later when it is folded with following or preceding DecodeN node.
 772         if (adr->in(0) == NULL) {
 773           // Keep looking for cast nodes.
 774           adr = adr->in(1);
 775           continue;
 776         }
 777         ccp->hash_delete(n);
 778         n->set_req(MemNode::Control, adr->in(0));
 779         ccp->hash_insert(n);
 780         return n;
 781 
 782       case Op_CastPP:
 783         // If the CastPP is useless, just peek on through it.
 784         if( ccp->type(adr) == ccp->type(adr->in(1)) ) {
 785           // Remember the cast that we've peeked though. If we peek
 786           // through more than one, then we end up remembering the highest
 787           // one, that is, if in a loop, the one closest to the top.
 788           skipped_cast = adr;
 789           adr = adr->in(1);
 790           continue;
 791         }
 792         // CastPP is going away in this pass!  We need this memory op to be
 793         // control-dependent on the test that is guarding the CastPP.
 794         ccp->hash_delete(n);
 795         n->set_req(MemNode::Control, adr->in(0));
 796         ccp->hash_insert(n);
 797         return n;
 798 
 799       case Op_Phi:
 800         // Attempt to float above a Phi to some dominating point.
 801         if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) {
 802           // If we've already peeked through a Cast (which could have set the
 803           // control), we can't float above a Phi, because the skipped Cast
 804           // may not be loop invariant.
 805           if (adr_phi_is_loop_invariant(adr, skipped_cast)) {
 806             adr = adr->in(1);
 807             continue;
 808           }
 809         }
 810 
 811         // Intentional fallthrough!
 812 
 813         // No obvious dominating point.  The mem op is pinned below the Phi
 814         // by the Phi itself.  If the Phi goes away (no true value is merged)
 815         // then the mem op can float, but not indefinitely.  It must be pinned
 816         // behind the controls leading to the Phi.
 817       case Op_CheckCastPP:
 818         // These usually stick around to change address type, however a
 819         // useless one can be elided and we still need to pick up a control edge
 820         if (adr->in(0) == NULL) {
 821           // This CheckCastPP node has NO control and is likely useless. But we
 822           // need check further up the ancestor chain for a control input to keep
 823           // the node in place. 4959717.
 824           skipped_cast = adr;
 825           adr = adr->in(1);
 826           continue;
 827         }
 828         ccp->hash_delete(n);
 829         n->set_req(MemNode::Control, adr->in(0));
 830         ccp->hash_insert(n);
 831         return n;
 832 
 833         // List of "safe" opcodes; those that implicitly block the memory
 834         // op below any null check.
 835       case Op_CastX2P:          // no null checks on native pointers
 836       case Op_Parm:             // 'this' pointer is not null
 837       case Op_LoadP:            // Loading from within a klass
 838       case Op_LoadN:            // Loading from within a klass
 839       case Op_LoadKlass:        // Loading from within a klass
 840       case Op_LoadNKlass:       // Loading from within a klass
 841       case Op_ConP:             // Loading from a klass
 842       case Op_ConN:             // Loading from a klass
 843       case Op_ConNKlass:        // Loading from a klass
 844       case Op_CreateEx:         // Sucking up the guts of an exception oop
 845       case Op_Con:              // Reading from TLS
 846       case Op_CMoveP:           // CMoveP is pinned
 847       case Op_CMoveN:           // CMoveN is pinned
 848         break;                  // No progress
 849 
 850       case Op_Proj:             // Direct call to an allocation routine
 851       case Op_SCMemProj:        // Memory state from store conditional ops
 852 #ifdef ASSERT
 853         {
 854           assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value");
 855           const Node* call = adr->in(0);
 856           if (call->is_CallJava()) {
 857             const CallJavaNode* call_java = call->as_CallJava();
 858             const TypeTuple *r = call_java->tf()->range();
 859             assert(r->cnt() > TypeFunc::Parms, "must return value");
 860             const Type* ret_type = r->field_at(TypeFunc::Parms);
 861             assert(ret_type && ret_type->isa_ptr(), "must return pointer");
 862             // We further presume that this is one of
 863             // new_instance_Java, new_array_Java, or
 864             // the like, but do not assert for this.
 865           } else if (call->is_Allocate()) {
 866             // similar case to new_instance_Java, etc.
 867           } else if (!call->is_CallLeaf()) {
 868             // Projections from fetch_oop (OSR) are allowed as well.
 869             ShouldNotReachHere();
 870           }
 871         }
 872 #endif
 873         break;
 874       default:
 875         ShouldNotReachHere();
 876       }
 877       break;
 878     }
 879   }
 880 
 881   return  NULL;               // No progress
 882 }
 883 
 884 
 885 //=============================================================================
 886 // Should LoadNode::Ideal() attempt to remove control edges?
 887 bool LoadNode::can_remove_control() const {
 888   return true;
 889 }
 890 uint LoadNode::size_of() const { return sizeof(*this); }
 891 uint LoadNode::cmp( const Node &n ) const
 892 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
 893 const Type *LoadNode::bottom_type() const { return _type; }
 894 uint LoadNode::ideal_reg() const {
 895   return _type->ideal_reg();
 896 }
 897 
 898 #ifndef PRODUCT
 899 void LoadNode::dump_spec(outputStream *st) const {
 900   MemNode::dump_spec(st);
 901   if( !Verbose && !WizardMode ) {
 902     // standard dump does this in Verbose and WizardMode
 903     st->print(" #"); _type->dump_on(st);
 904   }


1089       return phase->zerocon(memory_type());
1090     }
1091 
1092     // A load from an initialization barrier can match a captured store.
1093     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1094       InitializeNode* init = st->in(0)->as_Initialize();
1095       AllocateNode* alloc = init->allocation();
1096       if ((alloc != NULL) && (alloc == ld_alloc)) {
1097         // examine a captured store value
1098         st = init->find_captured_store(ld_off, memory_size(), phase);
1099         if (st != NULL)
1100           continue;             // take one more trip around
1101       }
1102     }
1103 
1104     // Load boxed value from result of valueOf() call is input parameter.
1105     if (this->is_Load() && ld_adr->is_AddP() &&
1106         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1107       intptr_t ignore = 0;
1108       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);





1109       if (base != NULL && base->is_Proj() &&
1110           base->as_Proj()->_con == TypeFunc::Parms &&
1111           base->in(0)->is_CallStaticJava() &&
1112           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1113         return base->in(0)->in(TypeFunc::Parms);
1114       }
1115     }
1116 
1117     break;
1118   }
1119 
1120   return NULL;
1121 }
1122 
1123 //----------------------is_instance_field_load_with_local_phi------------------
1124 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1125   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1126       in(Address)->is_AddP() ) {
1127     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1128     // Only instances and boxed values.


1138 }
1139 
1140 //------------------------------Identity---------------------------------------
1141 // Loads are identity if previous store is to same address
1142 Node *LoadNode::Identity( PhaseTransform *phase ) {
1143   // If the previous store-maker is the right kind of Store, and the store is
1144   // to the same address, then we are equal to the value stored.
1145   Node* mem = in(Memory);
1146   Node* value = can_see_stored_value(mem, phase);
1147   if( value ) {
1148     // byte, short & char stores truncate naturally.
1149     // A load has to load the truncated value which requires
1150     // some sort of masking operation and that requires an
1151     // Ideal call instead of an Identity call.
1152     if (memory_size() < BytesPerInt) {
1153       // If the input to the store does not fit with the load's result type,
1154       // it must be truncated via an Ideal call.
1155       if (!phase->type(value)->higher_equal(phase->type(this)))
1156         return this;
1157     }


















1158     // (This works even when value is a Con, but LoadNode::Value
1159     // usually runs first, producing the singleton type of the Con.)















1160     return value;
1161   }
1162 
1163   // Search for an existing data phi which was generated before for the same
1164   // instance's field to avoid infinite generation of phis in a loop.
1165   Node *region = mem->in(0);
1166   if (is_instance_field_load_with_local_phi(region)) {
1167     const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
1168     int this_index  = phase->C->get_alias_index(addr_t);
1169     int this_offset = addr_t->offset();
1170     int this_iid    = addr_t->instance_id();
1171     if (!addr_t->is_known_instance() &&
1172          addr_t->is_ptr_to_boxed_value()) {
1173       // Use _idx of address base (could be Phi node) for boxed values.
1174       intptr_t   ignore = 0;
1175       Node*      base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1176       if (base == NULL) {
1177         return this;
1178       }
1179       this_iid = base->_idx;


1678   if (tp == NULL || tp->empty())  return Type::TOP;
1679   int off = tp->offset();
1680   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1681   Compile* C = phase->C;
1682 
1683   // Try to guess loaded type from pointer type
1684   if (tp->isa_aryptr()) {
1685     const TypeAryPtr* ary = tp->is_aryptr();
1686     const Type* t = ary->elem();
1687 
1688     // Determine whether the reference is beyond the header or not, by comparing
1689     // the offset against the offset of the start of the array's data.
1690     // Different array types begin at slightly different offsets (12 vs. 16).
1691     // We choose T_BYTE as an example base type that is least restrictive
1692     // as to alignment, which will therefore produce the smallest
1693     // possible base offset.
1694     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1695     const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
1696 
1697     // Try to constant-fold a stable array element.
1698     if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
1699       // Make sure the reference is not into the header and the offset is constant
1700       if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {

1701         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
1702         if (con_type != NULL) {
1703           return con_type;
1704         }
1705       }
1706     }
1707 
1708     // Don't do this for integer types. There is only potential profit if
1709     // the element type t is lower than _type; that is, for int types, if _type is
1710     // more restrictive than t.  This only happens here if one is short and the other
1711     // char (both 16 bits), and in those cases we've made an intentional decision
1712     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1713     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1714     //
1715     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1716     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
1717     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1718     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
1719     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1720     // In fact, that could have been the original type of p1, and p1 could have




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "oops/objArrayKlass.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/compile.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/loopnode.hpp"
  35 #include "opto/machnode.hpp"
  36 #include "opto/matcher.hpp"
  37 #include "opto/memnode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/regmask.hpp"
  41 #if INCLUDE_ALL_GCS
  42 #include "gc_implementation/shenandoah/shenandoahForwarding.hpp"
  43 #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  44 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
  45 #endif
  46 
  47 // Portions of code courtesy of Clifford Click
  48 
  49 // Optimization - Graph Style
  50 
  51 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  52 
  53 //=============================================================================
  54 uint MemNode::size_of() const { return sizeof(*this); }
  55 
  56 const TypePtr *MemNode::adr_type() const {
  57   Node* adr = in(Address);
  58   const TypePtr* cross_check = NULL;
  59   DEBUG_ONLY(cross_check = _adr_type);
  60   return calculate_adr_type(adr->bottom_type(), cross_check);
  61 }
  62 
  63 bool MemNode::check_if_adr_maybe_raw(Node* adr) {
  64   if (adr != NULL) {
  65     if (adr->bottom_type()->base() == Type::RawPtr || adr->bottom_type()->base() == Type::AnyPtr) {


 660     //            TypeRawPtr::BOTTOM.  Needs to be investigated.
 661     if (cross_check != NULL &&
 662         cross_check != TypePtr::BOTTOM &&
 663         cross_check != TypeRawPtr::BOTTOM) {
 664       // Recheck the alias index, to see if it has changed (due to a bug).
 665       Compile* C = Compile::current();
 666       assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
 667              "must stay in the original alias category");
 668       // The type of the address must be contained in the adr_type,
 669       // disregarding "null"-ness.
 670       // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
 671       const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
 672       assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
 673              "real address must not escape from expected memory type");
 674     }
 675     #endif
 676     return tp;
 677   }
 678 }
 679 


















































































































































































































 680 //=============================================================================
 681 // Should LoadNode::Ideal() attempt to remove control edges?
 682 bool LoadNode::can_remove_control() const {
 683   return true;
 684 }
 685 uint LoadNode::size_of() const { return sizeof(*this); }
 686 uint LoadNode::cmp( const Node &n ) const
 687 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
 688 const Type *LoadNode::bottom_type() const { return _type; }
 689 uint LoadNode::ideal_reg() const {
 690   return _type->ideal_reg();
 691 }
 692 
 693 #ifndef PRODUCT
 694 void LoadNode::dump_spec(outputStream *st) const {
 695   MemNode::dump_spec(st);
 696   if( !Verbose && !WizardMode ) {
 697     // standard dump does this in Verbose and WizardMode
 698     st->print(" #"); _type->dump_on(st);
 699   }


 884       return phase->zerocon(memory_type());
 885     }
 886 
 887     // A load from an initialization barrier can match a captured store.
 888     if (st->is_Proj() && st->in(0)->is_Initialize()) {
 889       InitializeNode* init = st->in(0)->as_Initialize();
 890       AllocateNode* alloc = init->allocation();
 891       if ((alloc != NULL) && (alloc == ld_alloc)) {
 892         // examine a captured store value
 893         st = init->find_captured_store(ld_off, memory_size(), phase);
 894         if (st != NULL)
 895           continue;             // take one more trip around
 896       }
 897     }
 898 
 899     // Load boxed value from result of valueOf() call is input parameter.
 900     if (this->is_Load() && ld_adr->is_AddP() &&
 901         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
 902       intptr_t ignore = 0;
 903       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
 904 #if INCLUDE_ALL_GCS
 905       if (UseShenandoahGC) {
 906         base = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(base);
 907       }
 908 #endif
 909       if (base != NULL && base->is_Proj() &&
 910           base->as_Proj()->_con == TypeFunc::Parms &&
 911           base->in(0)->is_CallStaticJava() &&
 912           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
 913         return base->in(0)->in(TypeFunc::Parms);
 914       }
 915     }
 916 
 917     break;
 918   }
 919 
 920   return NULL;
 921 }
 922 
 923 //----------------------is_instance_field_load_with_local_phi------------------
 924 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
 925   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
 926       in(Address)->is_AddP() ) {
 927     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
 928     // Only instances and boxed values.


 938 }
 939 
 940 //------------------------------Identity---------------------------------------
 941 // Loads are identity if previous store is to same address
 942 Node *LoadNode::Identity( PhaseTransform *phase ) {
 943   // If the previous store-maker is the right kind of Store, and the store is
 944   // to the same address, then we are equal to the value stored.
 945   Node* mem = in(Memory);
 946   Node* value = can_see_stored_value(mem, phase);
 947   if( value ) {
 948     // byte, short & char stores truncate naturally.
 949     // A load has to load the truncated value which requires
 950     // some sort of masking operation and that requires an
 951     // Ideal call instead of an Identity call.
 952     if (memory_size() < BytesPerInt) {
 953       // If the input to the store does not fit with the load's result type,
 954       // it must be truncated via an Ideal call.
 955       if (!phase->type(value)->higher_equal(phase->type(this)))
 956         return this;
 957     }
 958     PhaseIterGVN* igvn = phase->is_IterGVN();
 959     if (UseShenandoahGC &&
 960         igvn != NULL &&
 961         value->is_Phi() &&
 962         value->req() > 2 &&
 963         value->in(1) != NULL &&
 964         value->in(1)->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 965       if (igvn->_worklist.member(value) ||
 966           igvn->_worklist.member(value->in(0)) ||
 967           (value->in(0)->in(1) != NULL &&
 968            value->in(0)->in(1)->is_IfProj() &&
 969            (igvn->_worklist.member(value->in(0)->in(1)) ||
 970             (value->in(0)->in(1)->in(0) != NULL &&
 971              igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) {
 972         igvn->_worklist.push(this);
 973         return this;
 974       }
 975     }
 976     // (This works even when value is a Con, but LoadNode::Value
 977     // usually runs first, producing the singleton type of the Con.)
 978     // TODO!!
 979     if (false && UseShenandoahGC) {
 980       Node* value_no_barrier = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value);
 981       if (value->Opcode() == Op_EncodeP) {
 982         if (value_no_barrier != value->in(1)) {
 983           Node* encode = value->clone();
 984           encode->set_req(1, value_no_barrier);
 985           encode = phase->transform(encode);
 986           return encode;
 987         }
 988       } else {
 989         return value_no_barrier;
 990       }
 991     }
 992 
 993     return value;
 994   }
 995 
 996   // Search for an existing data phi which was generated before for the same
 997   // instance's field to avoid infinite generation of phis in a loop.
 998   Node *region = mem->in(0);
 999   if (is_instance_field_load_with_local_phi(region)) {
1000     const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
1001     int this_index  = phase->C->get_alias_index(addr_t);
1002     int this_offset = addr_t->offset();
1003     int this_iid    = addr_t->instance_id();
1004     if (!addr_t->is_known_instance() &&
1005          addr_t->is_ptr_to_boxed_value()) {
1006       // Use _idx of address base (could be Phi node) for boxed values.
1007       intptr_t   ignore = 0;
1008       Node*      base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1009       if (base == NULL) {
1010         return this;
1011       }
1012       this_iid = base->_idx;


1511   if (tp == NULL || tp->empty())  return Type::TOP;
1512   int off = tp->offset();
1513   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1514   Compile* C = phase->C;
1515 
1516   // Try to guess loaded type from pointer type
1517   if (tp->isa_aryptr()) {
1518     const TypeAryPtr* ary = tp->is_aryptr();
1519     const Type* t = ary->elem();
1520 
1521     // Determine whether the reference is beyond the header or not, by comparing
1522     // the offset against the offset of the start of the array's data.
1523     // Different array types begin at slightly different offsets (12 vs. 16).
1524     // We choose T_BYTE as an example base type that is least restrictive
1525     // as to alignment, which will therefore produce the smallest
1526     // possible base offset.
1527     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1528     const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
1529 
1530     // Try to constant-fold a stable array element.
1531     if (FoldStableValues && ary->is_stable()) {
1532       // Make sure the reference is not into the header and the offset is constant
1533       ciObject* aobj = ary->const_oop();
1534       if (aobj != NULL && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
1535         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
1536         if (con_type != NULL) {
1537           return con_type;
1538         }
1539       }
1540     }
1541 
1542     // Don't do this for integer types. There is only potential profit if
1543     // the element type t is lower than _type; that is, for int types, if _type is
1544     // more restrictive than t.  This only happens here if one is short and the other
1545     // char (both 16 bits), and in those cases we've made an intentional decision
1546     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1547     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1548     //
1549     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1550     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
1551     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1552     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
1553     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1554     // In fact, that could have been the original type of p1, and p1 could have


< prev index next >