< prev index next >

src/share/vm/opto/library_call.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/callGenerator.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/idealKit.hpp"
  36 #include "opto/mathexactnode.hpp"
  37 #include "opto/mulnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "prims/nativeLookup.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "trace/traceMacros.hpp"







  44 
  45 class LibraryIntrinsic : public InlineCallGenerator {
  46   // Extend the set of intrinsics known to the runtime:
  47  public:
  48  private:
  49   bool             _is_virtual;
  50   bool             _does_virtual_dispatch;
  51   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  52   int8_t           _last_predicate; // Last generated predicate
  53   vmIntrinsics::ID _intrinsic_id;
  54 
  55  public:
  56   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  57     : InlineCallGenerator(m),
  58       _is_virtual(is_virtual),
  59       _does_virtual_dispatch(does_virtual_dispatch),
  60       _predicates_count((int8_t)predicates_count),
  61       _last_predicate((int8_t)-1),
  62       _intrinsic_id(id)
  63   {


2412   case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2413   default:  fatal_unexpected_iid(id);  break;
2414   }
2415   set_result(_gvn.transform(n));
2416   return true;
2417 }
2418 
2419 //----------------------------inline_unsafe_access----------------------------
2420 
2421 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2422 
2423 // Helper that guards and inserts a pre-barrier.
2424 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2425                                         Node* pre_val, bool need_mem_bar) {
2426   // We could be accessing the referent field of a reference object. If so, when G1
2427   // is enabled, we need to log the value in the referent field in an SATB buffer.
2428   // This routine performs some compile time filters and generates suitable
2429   // runtime filters that guard the pre-barrier code.
2430   // Also add memory barrier for non volatile load from the referent field
2431   // to prevent commoning of loads across safepoint.
2432   if (!UseG1GC && !need_mem_bar)
2433     return;
2434 
2435   // Some compile time checks.
2436 
2437   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2438   const TypeX* otype = offset->find_intptr_t_type();
2439   if (otype != NULL && otype->is_con() &&
2440       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2441     // Constant offset but not the reference_offset so just return
2442     return;
2443   }
2444 
2445   // We only need to generate the runtime guards for instances.
2446   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2447   if (btype != NULL) {
2448     if (btype->isa_aryptr()) {
2449       // Array type so nothing to do
2450       return;
2451     }
2452 


2669     }
2670     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2671       // Don't intrinsify mismatched object accesses
2672       return false;
2673     }
2674     mismatched = (bt != type);
2675   }
2676 
2677   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2678 
2679   // First guess at the value type.
2680   const Type *value_type = Type::get_const_basic_type(type);
2681 
2682   // We will need memory barriers unless we can determine a unique
2683   // alias category for this reference.  (Note:  If for some reason
2684   // the barriers get omitted and the unsafe reference begins to "pollute"
2685   // the alias analysis of the rest of the graph, either Compile::can_alias
2686   // or Compile::must_alias will throw a diagnostic assert.)
2687   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2688 








2689   // If we are reading the value of the referent field of a Reference
2690   // object (either by using Unsafe directly or through reflection)
2691   // then, if G1 is enabled, we need to record the referent in an
2692   // SATB log buffer using the pre-barrier mechanism.
2693   // Also we need to add memory barrier to prevent commoning reads
2694   // from this field across safepoint since GC can change its value.
2695   bool need_read_barrier = !is_native_ptr && !is_store &&
2696                            offset != top() && heap_base_oop != top();
2697 
2698   if (!is_store && type == T_OBJECT) {
2699     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2700     if (tjp != NULL) {
2701       value_type = tjp;
2702     }
2703   }
2704 
2705   receiver = null_check(receiver);
2706   if (stopped()) {
2707     return true;
2708   }
2709   // Heap pointers get a null-check from the interpreter,
2710   // as a courtesy.  However, this is not guaranteed by Unsafe,
2711   // and it is not possible to fully distinguish unintended nulls
2712   // from intended ones in this API.
2713 



2714   if (is_volatile) {
2715     // We need to emit leading and trailing CPU membars (see below) in
2716     // addition to memory membars when is_volatile. This is a little
2717     // too strong, but avoids the need to insert per-alias-type
2718     // volatile membars (for stores; compare Parse::do_put_xxx), which
2719     // we cannot do effectively here because we probably only have a
2720     // rough approximation of type.
2721     need_mem_bar = true;
2722     // For Stores, place a memory ordering barrier now.
2723     if (is_store) {
2724       insert_mem_bar(Op_MemBarRelease);
2725     } else {
2726       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2727         insert_mem_bar(Op_MemBarVolatile);
2728       }
2729     }
2730   }
2731 
2732   // Memory barrier to prevent normal and 'unsafe' accesses from
2733   // bypassing each other.  Happens after null checks, so the
2734   // exception paths do not take memory state from the memory barrier,
2735   // so there's no problems making a strong assert about mixing users
2736   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2737   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2738   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2739 
2740   if (!is_store) {
2741     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2742     // To be valid, unsafe loads may depend on other conditions than
2743     // the one that guards them: pin the Load node
2744     Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);





2745     // load value
2746     switch (type) {
2747     case T_BOOLEAN:
2748     case T_CHAR:
2749     case T_BYTE:
2750     case T_SHORT:
2751     case T_INT:
2752     case T_LONG:
2753     case T_FLOAT:
2754     case T_DOUBLE:
2755       break;
2756     case T_OBJECT:
2757       if (need_read_barrier) {
2758         insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2759       }
2760       break;
2761     case T_ADDRESS:
2762       // Cast to an int type.
2763       p = _gvn.transform(new (C) CastP2XNode(NULL, p));
2764       p = ConvX2UL(p);
2765       break;
2766     default:
2767       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2768       break;
2769     }
2770     // The load node has the control of the preceding MemBarCPUOrder.  All
2771     // following nodes will have the control of the MemBarCPUOrder inserted at
2772     // the end of this method.  So, pushing the load onto the stack at a later
2773     // point is fine.
2774     set_result(p);
2775   } else {
2776     // place effect of store into memory
2777     switch (type) {
2778     case T_DOUBLE:
2779       val = dstore_rounding(val);
2780       break;
2781     case T_ADDRESS:
2782       // Repackage the long as a pointer.
2783       val = ConvL2X(val);
2784       val = _gvn.transform(new (C) CastX2PNode(val));
2785       break;
2786     }
2787 
2788     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2789     if (type == T_OBJECT ) {
2790       (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2791     } else {
2792       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2793     }
2794   }
2795 
2796   if (is_volatile) {
2797     if (!is_store) {
2798       insert_mem_bar(Op_MemBarAcquire);






2799     } else {
2800       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2801         insert_mem_bar(Op_MemBarVolatile);

2802       }
2803     }
2804   }
2805 
2806   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2807 
2808   return true;
2809 }
2810 
2811 //----------------------------inline_unsafe_prefetch----------------------------
2812 
2813 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2814 #ifndef PRODUCT
2815   {
2816     ResourceMark rm;
2817     // Check the signatures.
2818     ciSignature* sig = callee()->signature();
2819 #ifdef ASSERT
2820     // Object getObject(Object base, int/long offset), etc.
2821     BasicType rtype = sig->return_type()->basic_type();


2981   if (kind == LS_xchg && type == T_OBJECT) {
2982     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2983     if (tjp != NULL) {
2984       value_type = tjp;
2985     }
2986   }
2987 
2988   // Null check receiver.
2989   receiver = null_check(receiver);
2990   if (stopped()) {
2991     return true;
2992   }
2993 
2994   int alias_idx = C->get_alias_index(adr_type);
2995 
2996   // Memory-model-wise, a LoadStore acts like a little synchronized
2997   // block, so needs barriers on each side.  These don't translate
2998   // into actual barriers on most machines, but we still need rest of
2999   // compiler to respect ordering.
3000 
3001   insert_mem_bar(Op_MemBarRelease);
3002   insert_mem_bar(Op_MemBarCPUOrder);
3003 
3004   // 4984716: MemBars must be inserted before this
3005   //          memory node in order to avoid a false
3006   //          dependency which will confuse the scheduler.
3007   Node *mem = memory(alias_idx);
3008 
3009   // For now, we handle only those cases that actually exist: ints,
3010   // longs, and Object. Adding others should be straightforward.
3011   Node* load_store = NULL;
3012   switch(type) {
3013   case T_INT:
3014     if (kind == LS_xadd) {
3015       load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3016     } else if (kind == LS_xchg) {
3017       load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3018     } else if (kind == LS_cmpxchg) {
3019       load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3020     } else {
3021       ShouldNotReachHere();


3080       if (kind == LS_xchg) {
3081         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3082       } else {
3083         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3084         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3085       }
3086     }
3087     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3088     break;
3089   default:
3090     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3091     break;
3092   }
3093 
3094   // SCMemProjNodes represent the memory state of a LoadStore. Their
3095   // main role is to prevent LoadStore nodes from being optimized away
3096   // when their results aren't used.
3097   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3098   set_memory(proj, alias_idx);
3099 


3100   if (type == T_OBJECT && kind == LS_xchg) {
3101 #ifdef _LP64
3102     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3103       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3104     }
3105 #endif





3106     if (can_move_pre_barrier()) {
3107       // Don't need to load pre_val. The old value is returned by load_store.
3108       // The pre_barrier can execute after the xchg as long as no safepoint
3109       // gets inserted between them.
3110       pre_barrier(false /* do_load */,
3111                   control(), NULL, NULL, max_juint, NULL, NULL,
3112                   load_store /* pre_val */,
3113                   T_OBJECT);
3114     }
3115   }
3116 
3117   // Add the trailing membar surrounding the access
3118   insert_mem_bar(Op_MemBarCPUOrder);
3119   insert_mem_bar(Op_MemBarAcquire);

3120 
3121   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3122   set_result(load_store);
3123   return true;
3124 }
3125 
3126 //----------------------------inline_unsafe_ordered_store----------------------
3127 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3128 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3129 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3130 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3131   // This is another variant of inline_unsafe_access, differing in
3132   // that it always issues store-store ("release") barrier and ensures
3133   // store-atomicity (which only matters for "long").
3134 
3135   if (callee()->is_static())  return false;  // caller must have the capability!
3136 
3137 #ifndef PRODUCT
3138   {
3139     ResourceMark rm;


4495   // 16 - 64-bit VM, normal klass
4496   if (base_off % BytesPerLong != 0) {
4497     assert(UseCompressedClassPointers, "");
4498     if (is_array) {
4499       // Exclude length to copy by 8 bytes words.
4500       base_off += sizeof(int);
4501     } else {
4502       // Include klass to copy by 8 bytes words.
4503       base_off = instanceOopDesc::klass_offset_in_bytes();
4504     }
4505     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4506   }
4507   src  = basic_plus_adr(src,  base_off);
4508   dest = basic_plus_adr(dest, base_off);
4509 
4510   // Compute the length also, if needed:
4511   Node* countx = size;
4512   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4513   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4514 














4515   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4516   bool disjoint_bases = true;
4517   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4518                                src, NULL, dest, NULL, countx,
4519                                /*dest_uninitialized*/true);
4520 
4521   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4522   if (card_mark) {
4523     assert(!is_array, "");
4524     // Put in store barrier for any and all oops we are sticking
4525     // into this object.  (We could avoid this if we could prove
4526     // that the object type contains no oop fields at all.)
4527     Node* no_particular_value = NULL;
4528     Node* no_particular_field = NULL;
4529     int raw_adr_idx = Compile::AliasIdxRaw;
4530     post_barrier(control(),
4531                  memory(raw_adr_type),
4532                  alloc_obj,
4533                  no_particular_field,
4534                  raw_adr_idx,


5231       PreserveJVMState pjvms(this);
5232       set_control(not_subtype_ctrl);
5233       // (At this point we can assume disjoint_bases, since types differ.)
5234       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5235       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5236       Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5237       Node* dest_elem_klass = _gvn.transform(n1);
5238       Node* cv = generate_checkcast_arraycopy(adr_type,
5239                                               dest_elem_klass,
5240                                               src, src_offset, dest, dest_offset,
5241                                               ConvI2X(copy_length), dest_uninitialized);
5242       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5243       checked_control = control();
5244       checked_i_o     = i_o();
5245       checked_mem     = memory(adr_type);
5246       checked_value   = cv;
5247     }
5248     // At this point we know we do not need type checks on oop stores.
5249 
5250     // Let's see if we need card marks:
5251     if (alloc != NULL && use_ReduceInitialCardMarks()) {
5252       // If we do not need card marks, copy using the jint or jlong stub.
5253       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5254       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5255              "sizes agree");
5256     }
5257   }
5258 
5259   if (!stopped()) {
5260     // Generate the fast path, if possible.
5261     PreserveJVMState pjvms(this);
5262     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5263                                  src, src_offset, dest, dest_offset,
5264                                  ConvI2X(copy_length), dest_uninitialized);
5265 
5266     // Present the results of the fast call.
5267     result_region->init_req(fast_path, control());
5268     result_i_o   ->init_req(fast_path, i_o());
5269     result_memory->init_req(fast_path, memory(adr_type));
5270   }
5271 


6265 }
6266 
6267 //----------------------------inline_reference_get----------------------------
6268 // public T java.lang.ref.Reference.get();
6269 bool LibraryCallKit::inline_reference_get() {
6270   const int referent_offset = java_lang_ref_Reference::referent_offset;
6271   guarantee(referent_offset > 0, "should have already been set");
6272 
6273   // Get the argument:
6274   Node* reference_obj = null_check_receiver();
6275   if (stopped()) return true;
6276 
6277   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
6278 
6279   ciInstanceKlass* klass = env()->Object_klass();
6280   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6281 
6282   Node* no_ctrl = NULL;
6283   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
6284 






6285   // Use the pre-barrier to record the value in the referent field
6286   pre_barrier(false /* do_load */,
6287               control(),
6288               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
6289               result /* pre_val */,
6290               T_OBJECT);
6291 
6292   // Add memory barrier to prevent commoning reads from this field
6293   // across safepoint since GC can change its value.
6294   insert_mem_bar(Op_MemBarCPUOrder);
6295 
6296   set_result(result);
6297   return true;
6298 }
6299 
6300 
6301 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6302                                               bool is_exact=true, bool is_static=false) {
6303 
6304   const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();


6314 
6315   // Next code  copied from Parse::do_get_xxx():
6316 
6317   // Compute address and memory type.
6318   int offset  = field->offset_in_bytes();
6319   bool is_vol = field->is_volatile();
6320   ciType* field_klass = field->type();
6321   assert(field_klass->is_loaded(), "should be loaded");
6322   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6323   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6324   BasicType bt = field->layout_type();
6325 
6326   // Build the resultant type of the load
6327   const Type *type;
6328   if (bt == T_OBJECT) {
6329     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6330   } else {
6331     type = Type::get_const_basic_type(bt);
6332   }
6333 

6334   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6335     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6336   }
6337   // Build the load.
6338   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6339   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);






6340   // If reference is volatile, prevent following memory ops from
6341   // floating up past the volatile read.  Also prevents commoning
6342   // another volatile read.
6343   if (is_vol) {
6344     // Memory barrier includes bogus read of value to force load BEFORE membar
6345     insert_mem_bar(Op_MemBarAcquire, loadedField);

6346   }
6347   return loadedField;
6348 }
6349 
6350 
6351 //------------------------------inline_aescrypt_Block-----------------------
6352 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6353   address stubAddr = NULL;
6354   const char *stubName;
6355   assert(UseAES, "need AES instruction support");
6356 
6357   switch(id) {
6358   case vmIntrinsics::_aescrypt_encryptBlock:
6359     stubAddr = StubRoutines::aescrypt_encryptBlock();
6360     stubName = "aescrypt_encryptBlock";
6361     break;
6362   case vmIntrinsics::_aescrypt_decryptBlock:
6363     stubAddr = StubRoutines::aescrypt_decryptBlock();
6364     stubName = "aescrypt_decryptBlock";
6365     break;




  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/callGenerator.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/idealKit.hpp"
  36 #include "opto/mathexactnode.hpp"
  37 #include "opto/mulnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "prims/nativeLookup.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "trace/traceMacros.hpp"
  44 #include "utilities/macros.hpp"
  45 #if INCLUDE_ALL_GCS
  46 #include "gc_implementation/shenandoah/shenandoahBarrierSetC2.hpp"
  47 #include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
  48 #include "gc_implementation/shenandoah/shenandoahSupport.hpp"
  49 #endif
  50 
  51 
  52 class LibraryIntrinsic : public InlineCallGenerator {
  53   // Extend the set of intrinsics known to the runtime:
  54  public:
  55  private:
  56   bool             _is_virtual;
  57   bool             _does_virtual_dispatch;
  58   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  59   int8_t           _last_predicate; // Last generated predicate
  60   vmIntrinsics::ID _intrinsic_id;
  61 
  62  public:
  63   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  64     : InlineCallGenerator(m),
  65       _is_virtual(is_virtual),
  66       _does_virtual_dispatch(does_virtual_dispatch),
  67       _predicates_count((int8_t)predicates_count),
  68       _last_predicate((int8_t)-1),
  69       _intrinsic_id(id)
  70   {


2419   case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2420   default:  fatal_unexpected_iid(id);  break;
2421   }
2422   set_result(_gvn.transform(n));
2423   return true;
2424 }
2425 
2426 //----------------------------inline_unsafe_access----------------------------
2427 
2428 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2429 
2430 // Helper that guards and inserts a pre-barrier.
2431 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2432                                         Node* pre_val, bool need_mem_bar) {
2433   // We could be accessing the referent field of a reference object. If so, when G1
2434   // is enabled, we need to log the value in the referent field in an SATB buffer.
2435   // This routine performs some compile time filters and generates suitable
2436   // runtime filters that guard the pre-barrier code.
2437   // Also add memory barrier for non volatile load from the referent field
2438   // to prevent commoning of loads across safepoint.
2439   if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar)
2440     return;
2441 
2442   // Some compile time checks.
2443 
2444   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2445   const TypeX* otype = offset->find_intptr_t_type();
2446   if (otype != NULL && otype->is_con() &&
2447       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2448     // Constant offset but not the reference_offset so just return
2449     return;
2450   }
2451 
2452   // We only need to generate the runtime guards for instances.
2453   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2454   if (btype != NULL) {
2455     if (btype->isa_aryptr()) {
2456       // Array type so nothing to do
2457       return;
2458     }
2459 


2676     }
2677     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2678       // Don't intrinsify mismatched object accesses
2679       return false;
2680     }
2681     mismatched = (bt != type);
2682   }
2683 
2684   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2685 
2686   // First guess at the value type.
2687   const Type *value_type = Type::get_const_basic_type(type);
2688 
2689   // We will need memory barriers unless we can determine a unique
2690   // alias category for this reference.  (Note:  If for some reason
2691   // the barriers get omitted and the unsafe reference begins to "pollute"
2692   // the alias analysis of the rest of the graph, either Compile::can_alias
2693   // or Compile::must_alias will throw a diagnostic assert.)
2694   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2695 
2696 #if INCLUDE_ALL_GCS
2697   // Work around JDK-8220714 bug. This is done for Shenandoah only, until
2698   // the shared code fix is upstreamed and properly tested there.
2699   if (UseShenandoahGC) {
2700     need_mem_bar |= is_native_ptr;
2701   }
2702 #endif
2703 
2704   // If we are reading the value of the referent field of a Reference
2705   // object (either by using Unsafe directly or through reflection)
2706   // then, if G1 is enabled, we need to record the referent in an
2707   // SATB log buffer using the pre-barrier mechanism.
2708   // Also we need to add memory barrier to prevent commoning reads
2709   // from this field across safepoint since GC can change its value.
2710   bool need_read_barrier = !is_native_ptr && !is_store &&
2711                            offset != top() && heap_base_oop != top();
2712 
2713   if (!is_store && type == T_OBJECT) {
2714     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2715     if (tjp != NULL) {
2716       value_type = tjp;
2717     }
2718   }
2719 
2720   receiver = null_check(receiver);
2721   if (stopped()) {
2722     return true;
2723   }
2724   // Heap pointers get a null-check from the interpreter,
2725   // as a courtesy.  However, this is not guaranteed by Unsafe,
2726   // and it is not possible to fully distinguish unintended nulls
2727   // from intended ones in this API.
2728 
2729   Node* load = NULL;
2730   Node* store = NULL;
2731   Node* leading_membar = NULL;
2732   if (is_volatile) {
2733     // We need to emit leading and trailing CPU membars (see below) in
2734     // addition to memory membars when is_volatile. This is a little
2735     // too strong, but avoids the need to insert per-alias-type
2736     // volatile membars (for stores; compare Parse::do_put_xxx), which
2737     // we cannot do effectively here because we probably only have a
2738     // rough approximation of type.
2739     need_mem_bar = true;
2740     // For Stores, place a memory ordering barrier now.
2741     if (is_store) {
2742       leading_membar = insert_mem_bar(Op_MemBarRelease);
2743     } else {
2744       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2745         leading_membar = insert_mem_bar(Op_MemBarVolatile);
2746       }
2747     }
2748   }
2749 
2750   // Memory barrier to prevent normal and 'unsafe' accesses from
2751   // bypassing each other.  Happens after null checks, so the
2752   // exception paths do not take memory state from the memory barrier,
2753   // so there's no problems making a strong assert about mixing users
2754   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2755   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2756   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2757 
2758   if (!is_store) {
2759     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2760     // To be valid, unsafe loads may depend on other conditions than
2761     // the one that guards them: pin the Load node
2762     load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2763 #if INCLUDE_ALL_GCS
2764     if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) {
2765       load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load);
2766     }
2767 #endif
2768     // load value
2769     switch (type) {
2770     case T_BOOLEAN:
2771     case T_CHAR:
2772     case T_BYTE:
2773     case T_SHORT:
2774     case T_INT:
2775     case T_LONG:
2776     case T_FLOAT:
2777     case T_DOUBLE:
2778       break;
2779     case T_OBJECT:
2780       if (need_read_barrier) {
2781         insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar));
2782       }
2783       break;
2784     case T_ADDRESS:
2785       // Cast to an int type.
2786       load = _gvn.transform(new (C) CastP2XNode(NULL, load));
2787       load = ConvX2UL(load);
2788       break;
2789     default:
2790       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2791       break;
2792     }
2793     // The load node has the control of the preceding MemBarCPUOrder.  All
2794     // following nodes will have the control of the MemBarCPUOrder inserted at
2795     // the end of this method.  So, pushing the load onto the stack at a later
2796     // point is fine.
2797     set_result(load);
2798   } else {
2799     // place effect of store into memory
2800     switch (type) {
2801     case T_DOUBLE:
2802       val = dstore_rounding(val);
2803       break;
2804     case T_ADDRESS:
2805       // Repackage the long as a pointer.
2806       val = ConvL2X(val);
2807       val = _gvn.transform(new (C) CastX2PNode(val));
2808       break;
2809     }
2810 
2811     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2812     if (type == T_OBJECT ) {
2813       store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2814     } else {
2815       store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2816     }
2817   }
2818 
2819   if (is_volatile) {
2820     if (!is_store) {
2821 #if INCLUDE_ALL_GCS
2822       if (UseShenandoahGC) {
2823         load = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(load);
2824       }
2825 #endif
2826       Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
2827       mb->as_MemBar()->set_trailing_load();
2828     } else {
2829       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2830         Node* mb = insert_mem_bar(Op_MemBarVolatile, store);
2831         MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
2832       }
2833     }
2834   }
2835 
2836   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2837 
2838   return true;
2839 }
2840 
2841 //----------------------------inline_unsafe_prefetch----------------------------
2842 
2843 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2844 #ifndef PRODUCT
2845   {
2846     ResourceMark rm;
2847     // Check the signatures.
2848     ciSignature* sig = callee()->signature();
2849 #ifdef ASSERT
2850     // Object getObject(Object base, int/long offset), etc.
2851     BasicType rtype = sig->return_type()->basic_type();


3011   if (kind == LS_xchg && type == T_OBJECT) {
3012     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
3013     if (tjp != NULL) {
3014       value_type = tjp;
3015     }
3016   }
3017 
3018   // Null check receiver.
3019   receiver = null_check(receiver);
3020   if (stopped()) {
3021     return true;
3022   }
3023 
3024   int alias_idx = C->get_alias_index(adr_type);
3025 
3026   // Memory-model-wise, a LoadStore acts like a little synchronized
3027   // block, so needs barriers on each side.  These don't translate
3028   // into actual barriers on most machines, but we still need rest of
3029   // compiler to respect ordering.
3030 
3031   Node* leading_membar = insert_mem_bar(Op_MemBarRelease);
3032   insert_mem_bar(Op_MemBarCPUOrder);
3033 
3034   // 4984716: MemBars must be inserted before this
3035   //          memory node in order to avoid a false
3036   //          dependency which will confuse the scheduler.
3037   Node *mem = memory(alias_idx);
3038 
3039   // For now, we handle only those cases that actually exist: ints,
3040   // longs, and Object. Adding others should be straightforward.
3041   Node* load_store = NULL;
3042   switch(type) {
3043   case T_INT:
3044     if (kind == LS_xadd) {
3045       load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3046     } else if (kind == LS_xchg) {
3047       load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3048     } else if (kind == LS_cmpxchg) {
3049       load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3050     } else {
3051       ShouldNotReachHere();


3110       if (kind == LS_xchg) {
3111         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3112       } else {
3113         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3114         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3115       }
3116     }
3117     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3118     break;
3119   default:
3120     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3121     break;
3122   }
3123 
3124   // SCMemProjNodes represent the memory state of a LoadStore. Their
3125   // main role is to prevent LoadStore nodes from being optimized away
3126   // when their results aren't used.
3127   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3128   set_memory(proj, alias_idx);
3129 
3130   Node* access = load_store;
3131 
3132   if (type == T_OBJECT && kind == LS_xchg) {
3133 #ifdef _LP64
3134     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3135       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3136     }
3137 #endif
3138 #if INCLUDE_ALL_GCS
3139   if (UseShenandoahGC) {
3140     load_store = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load_store);
3141   }
3142 #endif
3143     if (can_move_pre_barrier()) {
3144       // Don't need to load pre_val. The old value is returned by load_store.
3145       // The pre_barrier can execute after the xchg as long as no safepoint
3146       // gets inserted between them.
3147       pre_barrier(false /* do_load */,
3148                   control(), NULL, NULL, max_juint, NULL, NULL,
3149                   load_store /* pre_val */,
3150                   T_OBJECT);
3151     }
3152   }
3153 
3154   // Add the trailing membar surrounding the access
3155   insert_mem_bar(Op_MemBarCPUOrder);
3156   Node* mb = insert_mem_bar(Op_MemBarAcquire, access);
3157   MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
3158 
3159   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3160   set_result(load_store);
3161   return true;
3162 }
3163 
3164 //----------------------------inline_unsafe_ordered_store----------------------
3165 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3166 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3167 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3168 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3169   // This is another variant of inline_unsafe_access, differing in
3170   // that it always issues store-store ("release") barrier and ensures
3171   // store-atomicity (which only matters for "long").
3172 
3173   if (callee()->is_static())  return false;  // caller must have the capability!
3174 
3175 #ifndef PRODUCT
3176   {
3177     ResourceMark rm;


4533   // 16 - 64-bit VM, normal klass
4534   if (base_off % BytesPerLong != 0) {
4535     assert(UseCompressedClassPointers, "");
4536     if (is_array) {
4537       // Exclude length to copy by 8 bytes words.
4538       base_off += sizeof(int);
4539     } else {
4540       // Include klass to copy by 8 bytes words.
4541       base_off = instanceOopDesc::klass_offset_in_bytes();
4542     }
4543     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4544   }
4545   src  = basic_plus_adr(src,  base_off);
4546   dest = basic_plus_adr(dest, base_off);
4547 
4548   // Compute the length also, if needed:
4549   Node* countx = size;
4550   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4551   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4552 
4553 #if INCLUDE_ALL_GCS
4554   if (UseShenandoahGC && ShenandoahCloneBarrier) {
4555     assert (src->is_AddP(), "for clone the src should be the interior ptr");
4556     assert (dest->is_AddP(), "for clone the dst should be the interior ptr");
4557 
4558     // Make sure that references in the cloned object are updated for Shenandoah.
4559     make_runtime_call(RC_LEAF|RC_NO_FP,
4560                       OptoRuntime::shenandoah_clone_barrier_Type(),
4561                       CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
4562                       "shenandoah_clone_barrier", TypePtr::BOTTOM,
4563                       src->in(AddPNode::Base));
4564   }
4565 #endif
4566 
4567   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4568   bool disjoint_bases = true;
4569   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4570                                src, NULL, dest, NULL, countx,
4571                                /*dest_uninitialized*/true);
4572 
4573   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4574   if (card_mark) {
4575     assert(!is_array, "");
4576     // Put in store barrier for any and all oops we are sticking
4577     // into this object.  (We could avoid this if we could prove
4578     // that the object type contains no oop fields at all.)
4579     Node* no_particular_value = NULL;
4580     Node* no_particular_field = NULL;
4581     int raw_adr_idx = Compile::AliasIdxRaw;
4582     post_barrier(control(),
4583                  memory(raw_adr_type),
4584                  alloc_obj,
4585                  no_particular_field,
4586                  raw_adr_idx,


5283       PreserveJVMState pjvms(this);
5284       set_control(not_subtype_ctrl);
5285       // (At this point we can assume disjoint_bases, since types differ.)
5286       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5287       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5288       Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5289       Node* dest_elem_klass = _gvn.transform(n1);
5290       Node* cv = generate_checkcast_arraycopy(adr_type,
5291                                               dest_elem_klass,
5292                                               src, src_offset, dest, dest_offset,
5293                                               ConvI2X(copy_length), dest_uninitialized);
5294       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5295       checked_control = control();
5296       checked_i_o     = i_o();
5297       checked_mem     = memory(adr_type);
5298       checked_value   = cv;
5299     }
5300     // At this point we know we do not need type checks on oop stores.
5301 
5302     // Let's see if we need card marks:
5303     if (alloc != NULL && use_ReduceInitialCardMarks() && ! UseShenandoahGC) {
5304       // If we do not need card marks, copy using the jint or jlong stub.
5305       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5306       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5307              "sizes agree");
5308     }
5309   }
5310 
5311   if (!stopped()) {
5312     // Generate the fast path, if possible.
5313     PreserveJVMState pjvms(this);
5314     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5315                                  src, src_offset, dest, dest_offset,
5316                                  ConvI2X(copy_length), dest_uninitialized);
5317 
5318     // Present the results of the fast call.
5319     result_region->init_req(fast_path, control());
5320     result_i_o   ->init_req(fast_path, i_o());
5321     result_memory->init_req(fast_path, memory(adr_type));
5322   }
5323 


6317 }
6318 
6319 //----------------------------inline_reference_get----------------------------
6320 // public T java.lang.ref.Reference.get();
6321 bool LibraryCallKit::inline_reference_get() {
6322   const int referent_offset = java_lang_ref_Reference::referent_offset;
6323   guarantee(referent_offset > 0, "should have already been set");
6324 
6325   // Get the argument:
6326   Node* reference_obj = null_check_receiver();
6327   if (stopped()) return true;
6328 
6329   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
6330 
6331   ciInstanceKlass* klass = env()->Object_klass();
6332   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6333 
6334   Node* no_ctrl = NULL;
6335   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
6336 
6337 #if INCLUDE_ALL_GCS
6338   if (UseShenandoahGC) {
6339     result = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, result);
6340   }
6341 #endif
6342 
6343   // Use the pre-barrier to record the value in the referent field
6344   pre_barrier(false /* do_load */,
6345               control(),
6346               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
6347               result /* pre_val */,
6348               T_OBJECT);
6349 
6350   // Add memory barrier to prevent commoning reads from this field
6351   // across safepoint since GC can change its value.
6352   insert_mem_bar(Op_MemBarCPUOrder);
6353 
6354   set_result(result);
6355   return true;
6356 }
6357 
6358 
6359 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6360                                               bool is_exact=true, bool is_static=false) {
6361 
6362   const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();


6372 
6373   // Next code  copied from Parse::do_get_xxx():
6374 
6375   // Compute address and memory type.
6376   int offset  = field->offset_in_bytes();
6377   bool is_vol = field->is_volatile();
6378   ciType* field_klass = field->type();
6379   assert(field_klass->is_loaded(), "should be loaded");
6380   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6381   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6382   BasicType bt = field->layout_type();
6383 
6384   // Build the resultant type of the load
6385   const Type *type;
6386   if (bt == T_OBJECT) {
6387     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6388   } else {
6389     type = Type::get_const_basic_type(bt);
6390   }
6391 
6392   Node* leading_membar = NULL;
6393   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6394     leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6395   }
6396   // Build the load.
6397   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6398   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6399 #if INCLUDE_ALL_GCS
6400   if (UseShenandoahGC && (bt == T_OBJECT || bt == T_ARRAY)) {
6401     loadedField = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, loadedField);
6402   }
6403 #endif
6404 
6405   // If reference is volatile, prevent following memory ops from
6406   // floating up past the volatile read.  Also prevents commoning
6407   // another volatile read.
6408   if (is_vol) {
6409     // Memory barrier includes bogus read of value to force load BEFORE membar
6410     Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField);
6411     mb->as_MemBar()->set_trailing_load();
6412   }
6413   return loadedField;
6414 }
6415 
6416 
6417 //------------------------------inline_aescrypt_Block-----------------------
6418 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6419   address stubAddr = NULL;
6420   const char *stubName;
6421   assert(UseAES, "need AES instruction support");
6422 
6423   switch(id) {
6424   case vmIntrinsics::_aescrypt_encryptBlock:
6425     stubAddr = StubRoutines::aescrypt_encryptBlock();
6426     stubName = "aescrypt_encryptBlock";
6427     break;
6428   case vmIntrinsics::_aescrypt_decryptBlock:
6429     stubAddr = StubRoutines::aescrypt_decryptBlock();
6430     stubName = "aescrypt_decryptBlock";
6431     break;


< prev index next >