< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page




  26 #include "ci/ciUtilities.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/castnode.hpp"
  34 #include "opto/convertnode.hpp"
  35 #include "opto/graphKit.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/intrinsicnode.hpp"
  38 #include "opto/locknode.hpp"
  39 #include "opto/machnode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/parse.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/runtime.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/sharedRuntime.hpp"




  46 
  47 //----------------------------GraphKit-----------------------------------------
  48 // Main utility constructor.
  49 GraphKit::GraphKit(JVMState* jvms)
  50   : Phase(Phase::Parser),
  51     _env(C->env()),
  52     _gvn(*C->initial_gvn()),
  53     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  54 {
  55   _exceptions = jvms->map()->next_exception();
  56   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  57   set_jvms(jvms);
  58 }
  59 
  60 // Private constructor for parser.
  61 GraphKit::GraphKit()
  62   : Phase(Phase::Parser),
  63     _env(C->env()),
  64     _gvn(*C->initial_gvn()),
  65     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())


 580       break;
 581     case Deoptimization::Reason_class_check:
 582       if (java_bc() == Bytecodes::_aastore) {
 583         ex_obj = env()->ArrayStoreException_instance();
 584       } else {
 585         ex_obj = env()->ClassCastException_instance();
 586       }
 587       break;
 588     default:
 589       break;
 590     }
 591     if (failing()) { stop(); return; }  // exception allocation might fail
 592     if (ex_obj != NULL) {
 593       // Cheat with a preallocated exception object.
 594       if (C->log() != NULL)
 595         C->log()->elem("hot_throw preallocated='1' reason='%s'",
 596                        Deoptimization::trap_reason_name(reason));
 597       const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
 598       Node*              ex_node = _gvn.transform(ConNode::make(ex_con));
 599 


 600       // Clear the detail message of the preallocated exception object.
 601       // Weblogic sometimes mutates the detail message of exceptions
 602       // using reflection.
 603       int offset = java_lang_Throwable::get_detailMessage_offset();
 604       const TypePtr* adr_typ = ex_con->add_offset(offset);
 605 
 606       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
 607       const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
 608       Node *store = access_store_at(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, IN_HEAP);
 609 
 610       add_exception_state(make_exception_state(ex_node));
 611       return;
 612     }
 613   }
 614 
 615   // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
 616   // It won't be much cheaper than bailing to the interp., since we'll
 617   // have to pass up all the debug-info, and the runtime will have to
 618   // create the stack trace.
 619 


1664                                      int alias_idx,
1665                                      Node* new_val,
1666                                      const Type* value_type,
1667                                      BasicType bt,
1668                                      DecoratorSet decorators) {
1669   set_control(ctl);
1670   C2AccessValuePtr addr(adr, adr_type);
1671   C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1672   if (access.is_raw()) {
1673     return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1674   } else {
1675     return _barrier_set->atomic_add_at(access, new_val, value_type);
1676   }
1677 }
1678 
1679 void GraphKit::access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array) {
1680   set_control(ctl);
1681   return _barrier_set->clone(this, src, dst, size, is_array);
1682 }
1683 









1684 //-------------------------array_element_address-------------------------
1685 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1686                                       const TypeInt* sizetype, Node* ctrl) {
1687   uint shift  = exact_log2(type2aelembytes(elembt));
1688   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1689 
1690   // short-circuit a common case (saves lots of confusing waste motion)
1691   jint idx_con = find_int_con(idx, -1);
1692   if (idx_con >= 0) {
1693     intptr_t offset = header + ((intptr_t)idx_con << shift);
1694     return basic_plus_adr(ary, offset);
1695   }
1696 
1697   // must be correct type for alignment purposes
1698   Node* base  = basic_plus_adr(ary, header);
1699   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1700   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1701   return basic_plus_adr(ary, base, scale);
1702 }
1703 


3201   } else {
3202     set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3203   }
3204   return membar;
3205 }
3206 
3207 //------------------------------shared_lock------------------------------------
3208 // Emit locking code.
3209 FastLockNode* GraphKit::shared_lock(Node* obj) {
3210   // bci is either a monitorenter bc or InvocationEntryBci
3211   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3212   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3213 
3214   if( !GenerateSynchronizationCode )
3215     return NULL;                // Not locking things?
3216   if (stopped())                // Dead monitor?
3217     return NULL;
3218 
3219   assert(dead_locals_are_killed(), "should kill locals before sync. point");
3220 


3221   // Box the stack location
3222   Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3223   Node* mem = reset_memory();
3224 
3225   FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3226   if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
3227     // Create the counters for this fast lock.
3228     flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3229   }
3230 
3231   // Create the rtm counters for this fast lock if needed.
3232   flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3233 
3234   // Add monitor to debug info for the slow path.  If we block inside the
3235   // slow path and de-opt, we need the monitor hanging around
3236   map()->push_monitor( flock );
3237 
3238   const TypeFunc *tf = LockNode::lock_type();
3239   LockNode *lock = new LockNode(C, tf);
3240 


3696     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3697     if (ccast != length) {
3698       _gvn.set_type_bottom(ccast);
3699       record_for_igvn(ccast);
3700       replace_in_map(length, ccast);
3701     }
3702   }
3703 
3704   return javaoop;
3705 }
3706 
3707 // The following "Ideal_foo" functions are placed here because they recognize
3708 // the graph shapes created by the functions immediately above.
3709 
3710 //---------------------------Ideal_allocation----------------------------------
3711 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3712 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3713   if (ptr == NULL) {     // reduce dumb test in callers
3714     return NULL;
3715   }






3716   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3717     ptr = ptr->in(1);
3718     if (ptr == NULL) return NULL;
3719   }
3720   // Return NULL for allocations with several casts:
3721   //   j.l.reflect.Array.newInstance(jobject, jint)
3722   //   Object.clone()
3723   // to keep more precise type from last cast.
3724   if (ptr->is_Proj()) {
3725     Node* allo = ptr->in(0);
3726     if (allo != NULL && allo->is_Allocate()) {
3727       return allo->as_Allocate();
3728     }
3729   }
3730   // Report failure to match.
3731   return NULL;
3732 }
3733 
3734 // Fancy version which also strips off an offset (and reports it to caller).
3735 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,


3822 void GraphKit::final_sync(IdealKit& ideal) {
3823   // Final sync IdealKit and graphKit.
3824   sync_kit(ideal);
3825 }
3826 
3827 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
3828   Node* len = load_array_length(load_String_value(ctrl, str));
3829   Node* coder = load_String_coder(ctrl, str);
3830   // Divide length by 2 if coder is UTF16
3831   return _gvn.transform(new RShiftINode(len, coder));
3832 }
3833 
3834 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
3835   int value_offset = java_lang_String::value_offset_in_bytes();
3836   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3837                                                      false, NULL, 0);
3838   const TypePtr* value_field_type = string_type->add_offset(value_offset);
3839   const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
3840                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS),
3841                                                   ciTypeArrayKlass::make(T_BYTE), true, 0);







3842   Node* p = basic_plus_adr(str, str, value_offset);
3843   Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
3844                               IN_HEAP | C2_CONTROL_DEPENDENT_LOAD);
3845   // String.value field is known to be @Stable.
3846   if (UseImplicitStableValues) {
3847     load = cast_array_to_stable(load, value_type);
3848   }
3849   return load;
3850 }
3851 
3852 Node* GraphKit::load_String_coder(Node* ctrl, Node* str) {
3853   if (!CompactStrings) {
3854     return intcon(java_lang_String::CODER_UTF16);
3855   }
3856   int coder_offset = java_lang_String::coder_offset_in_bytes();
3857   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3858                                                      false, NULL, 0);
3859   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
3860   int coder_field_idx = C->get_alias_index(coder_field_type);







3861   return make_load(ctrl, basic_plus_adr(str, str, coder_offset),
3862                    TypeInt::BYTE, T_BYTE, coder_field_idx, MemNode::unordered);
3863 }
3864 
3865 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
3866   int value_offset = java_lang_String::value_offset_in_bytes();
3867   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3868                                                      false, NULL, 0);
3869   const TypePtr* value_field_type = string_type->add_offset(value_offset);



3870   access_store_at(ctrl, str,  basic_plus_adr(str, value_offset), value_field_type,
3871                   value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP);
3872 }
3873 
3874 void GraphKit::store_String_coder(Node* ctrl, Node* str, Node* value) {
3875   int coder_offset = java_lang_String::coder_offset_in_bytes();
3876   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3877                                                      false, NULL, 0);



3878   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
3879   int coder_field_idx = C->get_alias_index(coder_field_type);
3880   store_to_memory(ctrl, basic_plus_adr(str, coder_offset),
3881                   value, T_BYTE, coder_field_idx, MemNode::unordered);
3882 }
3883 
3884 // Capture src and dst memory state with a MergeMemNode
3885 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
3886   if (src_type == dst_type) {
3887     // Types are equal, we don't need a MergeMemNode
3888     return memory(src_type);
3889   }
3890   MergeMemNode* merge = MergeMemNode::make(map()->memory());
3891   record_for_igvn(merge); // fold it up later, if possible
3892   int src_idx = C->get_alias_index(src_type);
3893   int dst_idx = C->get_alias_index(dst_type);
3894   merge->set_memory_at(src_idx, memory(src_idx));
3895   merge->set_memory_at(dst_idx, memory(dst_idx));
3896   return merge;
3897 }
3898 
3899 Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) {
3900   assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported");


3912   // the load to read from memory not containing the result of the StoreB.
3913   // The correct memory graph should look like this:
3914   //  LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem))
3915   Node* mem = capture_memory(src_type, TypeAryPtr::BYTES);
3916   StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count);
3917   Node* res_mem = _gvn.transform(new SCMemProjNode(str));
3918   set_memory(res_mem, TypeAryPtr::BYTES);
3919   return str;
3920 }
3921 
3922 void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) {
3923   assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported");
3924   assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type");
3925   // Capture src and dst memory (see comment in 'compress_string').
3926   Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type);
3927   StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count);
3928   set_memory(_gvn.transform(str), dst_type);
3929 }
3930 
3931 void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) {



3932   /**
3933    * int i_char = start;
3934    * for (int i_byte = 0; i_byte < count; i_byte++) {
3935    *   dst[i_char++] = (char)(src[i_byte] & 0xff);
3936    * }
3937    */
3938   add_predicate();
3939   RegionNode* head = new RegionNode(3);
3940   head->init_req(1, control());
3941   gvn().set_type(head, Type::CONTROL);
3942   record_for_igvn(head);
3943 
3944   Node* i_byte = new PhiNode(head, TypeInt::INT);
3945   i_byte->init_req(1, intcon(0));
3946   gvn().set_type(i_byte, TypeInt::INT);
3947   record_for_igvn(i_byte);
3948 
3949   Node* i_char = new PhiNode(head, TypeInt::INT);
3950   i_char->init_req(1, start);
3951   gvn().set_type(i_char, TypeInt::INT);




  26 #include "ci/ciUtilities.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/castnode.hpp"
  34 #include "opto/convertnode.hpp"
  35 #include "opto/graphKit.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/intrinsicnode.hpp"
  38 #include "opto/locknode.hpp"
  39 #include "opto/machnode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/parse.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/runtime.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/macros.hpp"
  47 #if INCLUDE_SHENANDOAHGC
  48 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  49 #endif
  50 
  51 //----------------------------GraphKit-----------------------------------------
  52 // Main utility constructor.
  53 GraphKit::GraphKit(JVMState* jvms)
  54   : Phase(Phase::Parser),
  55     _env(C->env()),
  56     _gvn(*C->initial_gvn()),
  57     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  58 {
  59   _exceptions = jvms->map()->next_exception();
  60   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  61   set_jvms(jvms);
  62 }
  63 
  64 // Private constructor for parser.
  65 GraphKit::GraphKit()
  66   : Phase(Phase::Parser),
  67     _env(C->env()),
  68     _gvn(*C->initial_gvn()),
  69     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())


 584       break;
 585     case Deoptimization::Reason_class_check:
 586       if (java_bc() == Bytecodes::_aastore) {
 587         ex_obj = env()->ArrayStoreException_instance();
 588       } else {
 589         ex_obj = env()->ClassCastException_instance();
 590       }
 591       break;
 592     default:
 593       break;
 594     }
 595     if (failing()) { stop(); return; }  // exception allocation might fail
 596     if (ex_obj != NULL) {
 597       // Cheat with a preallocated exception object.
 598       if (C->log() != NULL)
 599         C->log()->elem("hot_throw preallocated='1' reason='%s'",
 600                        Deoptimization::trap_reason_name(reason));
 601       const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
 602       Node*              ex_node = _gvn.transform(ConNode::make(ex_con));
 603 
 604       ex_node = access_resolve_for_write(ex_node);
 605 
 606       // Clear the detail message of the preallocated exception object.
 607       // Weblogic sometimes mutates the detail message of exceptions
 608       // using reflection.
 609       int offset = java_lang_Throwable::get_detailMessage_offset();
 610       const TypePtr* adr_typ = ex_con->add_offset(offset);
 611 
 612       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
 613       const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
 614       Node *store = access_store_at(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, IN_HEAP);
 615 
 616       add_exception_state(make_exception_state(ex_node));
 617       return;
 618     }
 619   }
 620 
 621   // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
 622   // It won't be much cheaper than bailing to the interp., since we'll
 623   // have to pass up all the debug-info, and the runtime will have to
 624   // create the stack trace.
 625 


1670                                      int alias_idx,
1671                                      Node* new_val,
1672                                      const Type* value_type,
1673                                      BasicType bt,
1674                                      DecoratorSet decorators) {
1675   set_control(ctl);
1676   C2AccessValuePtr addr(adr, adr_type);
1677   C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1678   if (access.is_raw()) {
1679     return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1680   } else {
1681     return _barrier_set->atomic_add_at(access, new_val, value_type);
1682   }
1683 }
1684 
1685 void GraphKit::access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array) {
1686   set_control(ctl);
1687   return _barrier_set->clone(this, src, dst, size, is_array);
1688 }
1689 
1690 Node* GraphKit::access_resolve_for_read(Node* n) {
1691   return _barrier_set->resolve_for_read(this, n);
1692 }
1693 
1694 Node* GraphKit::access_resolve_for_write(Node* n) {
1695   return _barrier_set->resolve_for_write(this, n);
1696 }
1697 
1698 
1699 //-------------------------array_element_address-------------------------
1700 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1701                                       const TypeInt* sizetype, Node* ctrl) {
1702   uint shift  = exact_log2(type2aelembytes(elembt));
1703   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1704 
1705   // short-circuit a common case (saves lots of confusing waste motion)
1706   jint idx_con = find_int_con(idx, -1);
1707   if (idx_con >= 0) {
1708     intptr_t offset = header + ((intptr_t)idx_con << shift);
1709     return basic_plus_adr(ary, offset);
1710   }
1711 
1712   // must be correct type for alignment purposes
1713   Node* base  = basic_plus_adr(ary, header);
1714   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1715   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1716   return basic_plus_adr(ary, base, scale);
1717 }
1718 


3216   } else {
3217     set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3218   }
3219   return membar;
3220 }
3221 
3222 //------------------------------shared_lock------------------------------------
3223 // Emit locking code.
3224 FastLockNode* GraphKit::shared_lock(Node* obj) {
3225   // bci is either a monitorenter bc or InvocationEntryBci
3226   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3227   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3228 
3229   if( !GenerateSynchronizationCode )
3230     return NULL;                // Not locking things?
3231   if (stopped())                // Dead monitor?
3232     return NULL;
3233 
3234   assert(dead_locals_are_killed(), "should kill locals before sync. point");
3235 
3236   obj = access_resolve_for_write(obj);
3237 
3238   // Box the stack location
3239   Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3240   Node* mem = reset_memory();
3241 
3242   FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3243   if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
3244     // Create the counters for this fast lock.
3245     flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3246   }
3247 
3248   // Create the rtm counters for this fast lock if needed.
3249   flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3250 
3251   // Add monitor to debug info for the slow path.  If we block inside the
3252   // slow path and de-opt, we need the monitor hanging around
3253   map()->push_monitor( flock );
3254 
3255   const TypeFunc *tf = LockNode::lock_type();
3256   LockNode *lock = new LockNode(C, tf);
3257 


3713     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3714     if (ccast != length) {
3715       _gvn.set_type_bottom(ccast);
3716       record_for_igvn(ccast);
3717       replace_in_map(length, ccast);
3718     }
3719   }
3720 
3721   return javaoop;
3722 }
3723 
3724 // The following "Ideal_foo" functions are placed here because they recognize
3725 // the graph shapes created by the functions immediately above.
3726 
3727 //---------------------------Ideal_allocation----------------------------------
3728 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3729 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3730   if (ptr == NULL) {     // reduce dumb test in callers
3731     return NULL;
3732   }
3733 
3734 #if INCLUDE_SHENANDOAHGC
3735   // Attempt to see through Shenandoah barriers.
3736   ptr = ShenandoahBarrierNode::skip_through_barrier(ptr);
3737 #endif
3738 
3739   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3740     ptr = ptr->in(1);
3741     if (ptr == NULL) return NULL;
3742   }
3743   // Return NULL for allocations with several casts:
3744   //   j.l.reflect.Array.newInstance(jobject, jint)
3745   //   Object.clone()
3746   // to keep more precise type from last cast.
3747   if (ptr->is_Proj()) {
3748     Node* allo = ptr->in(0);
3749     if (allo != NULL && allo->is_Allocate()) {
3750       return allo->as_Allocate();
3751     }
3752   }
3753   // Report failure to match.
3754   return NULL;
3755 }
3756 
3757 // Fancy version which also strips off an offset (and reports it to caller).
3758 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,


3845 void GraphKit::final_sync(IdealKit& ideal) {
3846   // Final sync IdealKit and graphKit.
3847   sync_kit(ideal);
3848 }
3849 
3850 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
3851   Node* len = load_array_length(load_String_value(ctrl, str));
3852   Node* coder = load_String_coder(ctrl, str);
3853   // Divide length by 2 if coder is UTF16
3854   return _gvn.transform(new RShiftINode(len, coder));
3855 }
3856 
3857 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
3858   int value_offset = java_lang_String::value_offset_in_bytes();
3859   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3860                                                      false, NULL, 0);
3861   const TypePtr* value_field_type = string_type->add_offset(value_offset);
3862   const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
3863                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS),
3864                                                   ciTypeArrayKlass::make(T_BYTE), true, 0);
3865 
3866 #if INCLUDE_SHENANDOAHGC
3867   if (!ShenandoahOptimizeInstanceFinals) {
3868     str = access_resolve_for_read(str);
3869   }
3870 #endif
3871 
3872   Node* p = basic_plus_adr(str, str, value_offset);
3873   Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
3874                               IN_HEAP | C2_CONTROL_DEPENDENT_LOAD);
3875   // String.value field is known to be @Stable.
3876   if (UseImplicitStableValues) {
3877     load = cast_array_to_stable(load, value_type);
3878   }
3879   return load;
3880 }
3881 
3882 Node* GraphKit::load_String_coder(Node* ctrl, Node* str) {
3883   if (!CompactStrings) {
3884     return intcon(java_lang_String::CODER_UTF16);
3885   }
3886   int coder_offset = java_lang_String::coder_offset_in_bytes();
3887   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3888                                                      false, NULL, 0);
3889   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
3890   int coder_field_idx = C->get_alias_index(coder_field_type);
3891 
3892 #if INCLUDE_SHENANDOAHGC
3893   if (!ShenandoahOptimizeInstanceFinals) {
3894     str = access_resolve_for_read(str);
3895   }
3896 #endif
3897 
3898   return make_load(ctrl, basic_plus_adr(str, str, coder_offset),
3899                    TypeInt::BYTE, T_BYTE, coder_field_idx, MemNode::unordered);
3900 }
3901 
3902 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
3903   int value_offset = java_lang_String::value_offset_in_bytes();
3904   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3905                                                      false, NULL, 0);
3906   const TypePtr* value_field_type = string_type->add_offset(value_offset);
3907 
3908   str = access_resolve_for_write(str);
3909 
3910   access_store_at(ctrl, str,  basic_plus_adr(str, value_offset), value_field_type,
3911                   value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP);
3912 }
3913 
3914 void GraphKit::store_String_coder(Node* ctrl, Node* str, Node* value) {
3915   int coder_offset = java_lang_String::coder_offset_in_bytes();
3916   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3917                                                      false, NULL, 0);
3918 
3919   str = access_resolve_for_write(str);
3920 
3921   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
3922   int coder_field_idx = C->get_alias_index(coder_field_type);
3923   store_to_memory(control(), basic_plus_adr(str, coder_offset),
3924                   value, T_BYTE, coder_field_idx, MemNode::unordered);
3925 }
3926 
3927 // Capture src and dst memory state with a MergeMemNode
3928 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
3929   if (src_type == dst_type) {
3930     // Types are equal, we don't need a MergeMemNode
3931     return memory(src_type);
3932   }
3933   MergeMemNode* merge = MergeMemNode::make(map()->memory());
3934   record_for_igvn(merge); // fold it up later, if possible
3935   int src_idx = C->get_alias_index(src_type);
3936   int dst_idx = C->get_alias_index(dst_type);
3937   merge->set_memory_at(src_idx, memory(src_idx));
3938   merge->set_memory_at(dst_idx, memory(dst_idx));
3939   return merge;
3940 }
3941 
3942 Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) {
3943   assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported");


3955   // the load to read from memory not containing the result of the StoreB.
3956   // The correct memory graph should look like this:
3957   //  LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem))
3958   Node* mem = capture_memory(src_type, TypeAryPtr::BYTES);
3959   StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count);
3960   Node* res_mem = _gvn.transform(new SCMemProjNode(str));
3961   set_memory(res_mem, TypeAryPtr::BYTES);
3962   return str;
3963 }
3964 
3965 void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) {
3966   assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported");
3967   assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type");
3968   // Capture src and dst memory (see comment in 'compress_string').
3969   Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type);
3970   StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count);
3971   set_memory(_gvn.transform(str), dst_type);
3972 }
3973 
3974 void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) {
3975   src = access_resolve_for_read(src);
3976   dst = access_resolve_for_write(dst);
3977 
3978   /**
3979    * int i_char = start;
3980    * for (int i_byte = 0; i_byte < count; i_byte++) {
3981    *   dst[i_char++] = (char)(src[i_byte] & 0xff);
3982    * }
3983    */
3984   add_predicate();
3985   RegionNode* head = new RegionNode(3);
3986   head->init_req(1, control());
3987   gvn().set_type(head, Type::CONTROL);
3988   record_for_igvn(head);
3989 
3990   Node* i_byte = new PhiNode(head, TypeInt::INT);
3991   i_byte->init_req(1, intcon(0));
3992   gvn().set_type(i_byte, TypeInt::INT);
3993   record_for_igvn(i_byte);
3994 
3995   Node* i_char = new PhiNode(head, TypeInt::INT);
3996   i_char->init_req(1, start);
3997   gvn().set_type(i_char, TypeInt::INT);


< prev index next >