< prev index next >

src/share/vm/opto/library_call.cpp

Print this page




  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "jfr/support/jfrIntrinsics.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/mathexactnode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "prims/nativeLookup.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/macros.hpp"





  45 
  46 class LibraryIntrinsic : public InlineCallGenerator {
  47   // Extend the set of intrinsics known to the runtime:
  48  public:
  49  private:
  50   bool             _is_virtual;
  51   bool             _does_virtual_dispatch;
  52   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  53   int8_t           _last_predicate; // Last generated predicate
  54   vmIntrinsics::ID _intrinsic_id;
  55 
  56  public:
  57   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  58     : InlineCallGenerator(m),
  59       _is_virtual(is_virtual),
  60       _does_virtual_dispatch(does_virtual_dispatch),
  61       _predicates_count((int8_t)predicates_count),
  62       _last_predicate((int8_t)-1),
  63       _intrinsic_id(id)
  64   {


2413   case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2414   default:  fatal_unexpected_iid(id);  break;
2415   }
2416   set_result(_gvn.transform(n));
2417   return true;
2418 }
2419 
2420 //----------------------------inline_unsafe_access----------------------------
2421 
2422 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2423 
2424 // Helper that guards and inserts a pre-barrier.
2425 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2426                                         Node* pre_val, bool need_mem_bar) {
2427   // We could be accessing the referent field of a reference object. If so, when G1
2428   // is enabled, we need to log the value in the referent field in an SATB buffer.
2429   // This routine performs some compile time filters and generates suitable
2430   // runtime filters that guard the pre-barrier code.
2431   // Also add memory barrier for non volatile load from the referent field
2432   // to prevent commoning of loads across safepoint.
2433   if (!UseG1GC && !need_mem_bar)
2434     return;
2435 
2436   // Some compile time checks.
2437 
2438   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2439   const TypeX* otype = offset->find_intptr_t_type();
2440   if (otype != NULL && otype->is_con() &&
2441       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2442     // Constant offset but not the reference_offset so just return
2443     return;
2444   }
2445 
2446   // We only need to generate the runtime guards for instances.
2447   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2448   if (btype != NULL) {
2449     if (btype->isa_aryptr()) {
2450       // Array type so nothing to do
2451       return;
2452     }
2453 


2670     }
2671     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2672       // Don't intrinsify mismatched object accesses
2673       return false;
2674     }
2675     mismatched = (bt != type);
2676   }
2677 
2678   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2679 
2680   // First guess at the value type.
2681   const Type *value_type = Type::get_const_basic_type(type);
2682 
2683   // We will need memory barriers unless we can determine a unique
2684   // alias category for this reference.  (Note:  If for some reason
2685   // the barriers get omitted and the unsafe reference begins to "pollute"
2686   // the alias analysis of the rest of the graph, either Compile::can_alias
2687   // or Compile::must_alias will throw a diagnostic assert.)
2688   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2689 








2690   // If we are reading the value of the referent field of a Reference
2691   // object (either by using Unsafe directly or through reflection)
2692   // then, if G1 is enabled, we need to record the referent in an
2693   // SATB log buffer using the pre-barrier mechanism.
2694   // Also we need to add memory barrier to prevent commoning reads
2695   // from this field across safepoint since GC can change its value.
2696   bool need_read_barrier = !is_native_ptr && !is_store &&
2697                            offset != top() && heap_base_oop != top();
2698 
2699   if (!is_store && type == T_OBJECT) {
2700     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2701     if (tjp != NULL) {
2702       value_type = tjp;
2703     }
2704   }
2705 
2706   receiver = null_check(receiver);
2707   if (stopped()) {
2708     return true;
2709   }
2710   // Heap pointers get a null-check from the interpreter,
2711   // as a courtesy.  However, this is not guaranteed by Unsafe,
2712   // and it is not possible to fully distinguish unintended nulls
2713   // from intended ones in this API.
2714 



2715   if (is_volatile) {
2716     // We need to emit leading and trailing CPU membars (see below) in
2717     // addition to memory membars when is_volatile. This is a little
2718     // too strong, but avoids the need to insert per-alias-type
2719     // volatile membars (for stores; compare Parse::do_put_xxx), which
2720     // we cannot do effectively here because we probably only have a
2721     // rough approximation of type.
2722     need_mem_bar = true;
2723     // For Stores, place a memory ordering barrier now.
2724     if (is_store) {
2725       insert_mem_bar(Op_MemBarRelease);
2726     } else {
2727       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2728         insert_mem_bar(Op_MemBarVolatile);
2729       }
2730     }
2731   }
2732 
2733   // Memory barrier to prevent normal and 'unsafe' accesses from
2734   // bypassing each other.  Happens after null checks, so the
2735   // exception paths do not take memory state from the memory barrier,
2736   // so there's no problems making a strong assert about mixing users
2737   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2738   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2739   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2740 
2741   if (!is_store) {
2742     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2743     // To be valid, unsafe loads may depend on other conditions than
2744     // the one that guards them: pin the Load node
2745     Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);





2746     // load value
2747     switch (type) {
2748     case T_BOOLEAN:
2749     case T_CHAR:
2750     case T_BYTE:
2751     case T_SHORT:
2752     case T_INT:
2753     case T_LONG:
2754     case T_FLOAT:
2755     case T_DOUBLE:
2756       break;
2757     case T_OBJECT:
2758       if (need_read_barrier) {
2759         insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2760       }
2761       break;
2762     case T_ADDRESS:
2763       // Cast to an int type.
2764       p = _gvn.transform(new (C) CastP2XNode(NULL, p));
2765       p = ConvX2UL(p);
2766       break;
2767     default:
2768       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2769       break;
2770     }
2771     // The load node has the control of the preceding MemBarCPUOrder.  All
2772     // following nodes will have the control of the MemBarCPUOrder inserted at
2773     // the end of this method.  So, pushing the load onto the stack at a later
2774     // point is fine.
2775     set_result(p);
2776   } else {
2777     // place effect of store into memory
2778     switch (type) {
2779     case T_DOUBLE:
2780       val = dstore_rounding(val);
2781       break;
2782     case T_ADDRESS:
2783       // Repackage the long as a pointer.
2784       val = ConvL2X(val);
2785       val = _gvn.transform(new (C) CastX2PNode(val));
2786       break;
2787     }
2788 
2789     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2790     if (type == T_OBJECT ) {
2791       (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2792     } else {
2793       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2794     }
2795   }
2796 
2797   if (is_volatile) {
2798     if (!is_store) {
2799       insert_mem_bar(Op_MemBarAcquire);






2800     } else {
2801       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2802         insert_mem_bar(Op_MemBarVolatile);

2803       }
2804     }
2805   }
2806 
2807   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2808 
2809   return true;
2810 }
2811 
2812 //----------------------------inline_unsafe_prefetch----------------------------
2813 
2814 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2815 #ifndef PRODUCT
2816   {
2817     ResourceMark rm;
2818     // Check the signatures.
2819     ciSignature* sig = callee()->signature();
2820 #ifdef ASSERT
2821     // Object getObject(Object base, int/long offset), etc.
2822     BasicType rtype = sig->return_type()->basic_type();


2982   if (kind == LS_xchg && type == T_OBJECT) {
2983     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2984     if (tjp != NULL) {
2985       value_type = tjp;
2986     }
2987   }
2988 
2989   // Null check receiver.
2990   receiver = null_check(receiver);
2991   if (stopped()) {
2992     return true;
2993   }
2994 
2995   int alias_idx = C->get_alias_index(adr_type);
2996 
2997   // Memory-model-wise, a LoadStore acts like a little synchronized
2998   // block, so needs barriers on each side.  These don't translate
2999   // into actual barriers on most machines, but we still need rest of
3000   // compiler to respect ordering.
3001 
3002   insert_mem_bar(Op_MemBarRelease);
3003   insert_mem_bar(Op_MemBarCPUOrder);
3004 
3005   // 4984716: MemBars must be inserted before this
3006   //          memory node in order to avoid a false
3007   //          dependency which will confuse the scheduler.
3008   Node *mem = memory(alias_idx);
3009 
3010   // For now, we handle only those cases that actually exist: ints,
3011   // longs, and Object. Adding others should be straightforward.
3012   Node* load_store = NULL;
3013   switch(type) {
3014   case T_INT:
3015     if (kind == LS_xadd) {
3016       load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3017     } else if (kind == LS_xchg) {
3018       load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3019     } else if (kind == LS_cmpxchg) {
3020       load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3021     } else {
3022       ShouldNotReachHere();


3081       if (kind == LS_xchg) {
3082         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3083       } else {
3084         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3085         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3086       }
3087     }
3088     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3089     break;
3090   default:
3091     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3092     break;
3093   }
3094 
3095   // SCMemProjNodes represent the memory state of a LoadStore. Their
3096   // main role is to prevent LoadStore nodes from being optimized away
3097   // when their results aren't used.
3098   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3099   set_memory(proj, alias_idx);
3100 


3101   if (type == T_OBJECT && kind == LS_xchg) {
3102 #ifdef _LP64
3103     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3104       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3105     }
3106 #endif





3107     if (can_move_pre_barrier()) {
3108       // Don't need to load pre_val. The old value is returned by load_store.
3109       // The pre_barrier can execute after the xchg as long as no safepoint
3110       // gets inserted between them.
3111       pre_barrier(false /* do_load */,
3112                   control(), NULL, NULL, max_juint, NULL, NULL,
3113                   load_store /* pre_val */,
3114                   T_OBJECT);
3115     }
3116   }
3117 
3118   // Add the trailing membar surrounding the access
3119   insert_mem_bar(Op_MemBarCPUOrder);
3120   insert_mem_bar(Op_MemBarAcquire);

3121 
3122   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3123   set_result(load_store);
3124   return true;
3125 }
3126 
3127 //----------------------------inline_unsafe_ordered_store----------------------
3128 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3129 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3130 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3131 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3132   // This is another variant of inline_unsafe_access, differing in
3133   // that it always issues store-store ("release") barrier and ensures
3134   // store-atomicity (which only matters for "long").
3135 
3136   if (callee()->is_static())  return false;  // caller must have the capability!
3137 
3138 #ifndef PRODUCT
3139   {
3140     ResourceMark rm;


4521   // 16 - 64-bit VM, normal klass
4522   if (base_off % BytesPerLong != 0) {
4523     assert(UseCompressedClassPointers, "");
4524     if (is_array) {
4525       // Exclude length to copy by 8 bytes words.
4526       base_off += sizeof(int);
4527     } else {
4528       // Include klass to copy by 8 bytes words.
4529       base_off = instanceOopDesc::klass_offset_in_bytes();
4530     }
4531     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4532   }
4533   src  = basic_plus_adr(src,  base_off);
4534   dest = basic_plus_adr(dest, base_off);
4535 
4536   // Compute the length also, if needed:
4537   Node* countx = size;
4538   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4539   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4540 














4541   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4542   bool disjoint_bases = true;
4543   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4544                                src, NULL, dest, NULL, countx,
4545                                /*dest_uninitialized*/true);
4546 
4547   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4548   if (card_mark) {
4549     assert(!is_array, "");
4550     // Put in store barrier for any and all oops we are sticking
4551     // into this object.  (We could avoid this if we could prove
4552     // that the object type contains no oop fields at all.)
4553     Node* no_particular_value = NULL;
4554     Node* no_particular_field = NULL;
4555     int raw_adr_idx = Compile::AliasIdxRaw;
4556     post_barrier(control(),
4557                  memory(raw_adr_type),
4558                  alloc_obj,
4559                  no_particular_field,
4560                  raw_adr_idx,


5257       PreserveJVMState pjvms(this);
5258       set_control(not_subtype_ctrl);
5259       // (At this point we can assume disjoint_bases, since types differ.)
5260       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5261       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5262       Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5263       Node* dest_elem_klass = _gvn.transform(n1);
5264       Node* cv = generate_checkcast_arraycopy(adr_type,
5265                                               dest_elem_klass,
5266                                               src, src_offset, dest, dest_offset,
5267                                               ConvI2X(copy_length), dest_uninitialized);
5268       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5269       checked_control = control();
5270       checked_i_o     = i_o();
5271       checked_mem     = memory(adr_type);
5272       checked_value   = cv;
5273     }
5274     // At this point we know we do not need type checks on oop stores.
5275 
5276     // Let's see if we need card marks:
5277     if (alloc != NULL && use_ReduceInitialCardMarks()) {
5278       // If we do not need card marks, copy using the jint or jlong stub.
5279       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5280       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5281              "sizes agree");
5282     }
5283   }
5284 
5285   if (!stopped()) {
5286     // Generate the fast path, if possible.
5287     PreserveJVMState pjvms(this);
5288     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5289                                  src, src_offset, dest, dest_offset,
5290                                  ConvI2X(copy_length), dest_uninitialized);
5291 
5292     // Present the results of the fast call.
5293     result_region->init_req(fast_path, control());
5294     result_i_o   ->init_req(fast_path, i_o());
5295     result_memory->init_req(fast_path, memory(adr_type));
5296   }
5297 


6291 }
6292 
6293 //----------------------------inline_reference_get----------------------------
6294 // public T java.lang.ref.Reference.get();
6295 bool LibraryCallKit::inline_reference_get() {
6296   const int referent_offset = java_lang_ref_Reference::referent_offset;
6297   guarantee(referent_offset > 0, "should have already been set");
6298 
6299   // Get the argument:
6300   Node* reference_obj = null_check_receiver();
6301   if (stopped()) return true;
6302 
6303   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
6304 
6305   ciInstanceKlass* klass = env()->Object_klass();
6306   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6307 
6308   Node* no_ctrl = NULL;
6309   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
6310 






6311   // Use the pre-barrier to record the value in the referent field
6312   pre_barrier(false /* do_load */,
6313               control(),
6314               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
6315               result /* pre_val */,
6316               T_OBJECT);
6317 
6318   // Add memory barrier to prevent commoning reads from this field
6319   // across safepoint since GC can change its value.
6320   insert_mem_bar(Op_MemBarCPUOrder);
6321 
6322   set_result(result);
6323   return true;
6324 }
6325 
6326 
6327 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6328                                               bool is_exact=true, bool is_static=false) {
6329 
6330   const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();


6340 
6341   // Next code  copied from Parse::do_get_xxx():
6342 
6343   // Compute address and memory type.
6344   int offset  = field->offset_in_bytes();
6345   bool is_vol = field->is_volatile();
6346   ciType* field_klass = field->type();
6347   assert(field_klass->is_loaded(), "should be loaded");
6348   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6349   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6350   BasicType bt = field->layout_type();
6351 
6352   // Build the resultant type of the load
6353   const Type *type;
6354   if (bt == T_OBJECT) {
6355     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6356   } else {
6357     type = Type::get_const_basic_type(bt);
6358   }
6359 

6360   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6361     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6362   }
6363   // Build the load.
6364   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6365   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);






6366   // If reference is volatile, prevent following memory ops from
6367   // floating up past the volatile read.  Also prevents commoning
6368   // another volatile read.
6369   if (is_vol) {
6370     // Memory barrier includes bogus read of value to force load BEFORE membar
6371     insert_mem_bar(Op_MemBarAcquire, loadedField);

6372   }
6373   return loadedField;
6374 }
6375 
6376 
6377 //------------------------------inline_aescrypt_Block-----------------------
6378 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6379   address stubAddr = NULL;
6380   const char *stubName;
6381   assert(UseAES, "need AES instruction support");
6382 
6383   switch(id) {
6384   case vmIntrinsics::_aescrypt_encryptBlock:
6385     stubAddr = StubRoutines::aescrypt_encryptBlock();
6386     stubName = "aescrypt_encryptBlock";
6387     break;
6388   case vmIntrinsics::_aescrypt_decryptBlock:
6389     stubAddr = StubRoutines::aescrypt_decryptBlock();
6390     stubName = "aescrypt_decryptBlock";
6391     break;




  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "jfr/support/jfrIntrinsics.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/mathexactnode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "prims/nativeLookup.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/macros.hpp"
  45 #if INCLUDE_ALL_GCS
  46 #include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
  47 #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  48 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
  49 #endif
  50 
  51 class LibraryIntrinsic : public InlineCallGenerator {
  52   // Extend the set of intrinsics known to the runtime:
  53  public:
  54  private:
  55   bool             _is_virtual;
  56   bool             _does_virtual_dispatch;
  57   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  58   int8_t           _last_predicate; // Last generated predicate
  59   vmIntrinsics::ID _intrinsic_id;
  60 
  61  public:
  62   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  63     : InlineCallGenerator(m),
  64       _is_virtual(is_virtual),
  65       _does_virtual_dispatch(does_virtual_dispatch),
  66       _predicates_count((int8_t)predicates_count),
  67       _last_predicate((int8_t)-1),
  68       _intrinsic_id(id)
  69   {


2418   case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2419   default:  fatal_unexpected_iid(id);  break;
2420   }
2421   set_result(_gvn.transform(n));
2422   return true;
2423 }
2424 
2425 //----------------------------inline_unsafe_access----------------------------
2426 
2427 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2428 
2429 // Helper that guards and inserts a pre-barrier.
2430 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2431                                         Node* pre_val, bool need_mem_bar) {
2432   // We could be accessing the referent field of a reference object. If so, when G1
2433   // is enabled, we need to log the value in the referent field in an SATB buffer.
2434   // This routine performs some compile time filters and generates suitable
2435   // runtime filters that guard the pre-barrier code.
2436   // Also add memory barrier for non volatile load from the referent field
2437   // to prevent commoning of loads across safepoint.
2438   if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar)
2439     return;
2440 
2441   // Some compile time checks.
2442 
2443   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2444   const TypeX* otype = offset->find_intptr_t_type();
2445   if (otype != NULL && otype->is_con() &&
2446       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2447     // Constant offset but not the reference_offset so just return
2448     return;
2449   }
2450 
2451   // We only need to generate the runtime guards for instances.
2452   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2453   if (btype != NULL) {
2454     if (btype->isa_aryptr()) {
2455       // Array type so nothing to do
2456       return;
2457     }
2458 


2675     }
2676     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2677       // Don't intrinsify mismatched object accesses
2678       return false;
2679     }
2680     mismatched = (bt != type);
2681   }
2682 
2683   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2684 
2685   // First guess at the value type.
2686   const Type *value_type = Type::get_const_basic_type(type);
2687 
2688   // We will need memory barriers unless we can determine a unique
2689   // alias category for this reference.  (Note:  If for some reason
2690   // the barriers get omitted and the unsafe reference begins to "pollute"
2691   // the alias analysis of the rest of the graph, either Compile::can_alias
2692   // or Compile::must_alias will throw a diagnostic assert.)
2693   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2694 
2695 #if INCLUDE_ALL_GCS
2696   // Work around JDK-8220714 bug. This is done for Shenandoah only, until
2697   // the shared code fix is upstreamed and properly tested there.
2698   if (UseShenandoahGC) {
2699     need_mem_bar |= is_native_ptr;
2700   }
2701 #endif
2702 
2703   // If we are reading the value of the referent field of a Reference
2704   // object (either by using Unsafe directly or through reflection)
2705   // then, if G1 is enabled, we need to record the referent in an
2706   // SATB log buffer using the pre-barrier mechanism.
2707   // Also we need to add memory barrier to prevent commoning reads
2708   // from this field across safepoint since GC can change its value.
2709   bool need_read_barrier = !is_native_ptr && !is_store &&
2710                            offset != top() && heap_base_oop != top();
2711 
2712   if (!is_store && type == T_OBJECT) {
2713     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2714     if (tjp != NULL) {
2715       value_type = tjp;
2716     }
2717   }
2718 
2719   receiver = null_check(receiver);
2720   if (stopped()) {
2721     return true;
2722   }
2723   // Heap pointers get a null-check from the interpreter,
2724   // as a courtesy.  However, this is not guaranteed by Unsafe,
2725   // and it is not possible to fully distinguish unintended nulls
2726   // from intended ones in this API.
2727 
2728   Node* load = NULL;
2729   Node* store = NULL;
2730   Node* leading_membar = NULL;
2731   if (is_volatile) {
2732     // We need to emit leading and trailing CPU membars (see below) in
2733     // addition to memory membars when is_volatile. This is a little
2734     // too strong, but avoids the need to insert per-alias-type
2735     // volatile membars (for stores; compare Parse::do_put_xxx), which
2736     // we cannot do effectively here because we probably only have a
2737     // rough approximation of type.
2738     need_mem_bar = true;
2739     // For Stores, place a memory ordering barrier now.
2740     if (is_store) {
2741       leading_membar = insert_mem_bar(Op_MemBarRelease);
2742     } else {
2743       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2744         leading_membar = insert_mem_bar(Op_MemBarVolatile);
2745       }
2746     }
2747   }
2748 
2749   // Memory barrier to prevent normal and 'unsafe' accesses from
2750   // bypassing each other.  Happens after null checks, so the
2751   // exception paths do not take memory state from the memory barrier,
2752   // so there's no problems making a strong assert about mixing users
2753   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2754   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2755   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2756 
2757   if (!is_store) {
2758     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2759     // To be valid, unsafe loads may depend on other conditions than
2760     // the one that guards them: pin the Load node
2761     load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2762 #if INCLUDE_ALL_GCS
2763     if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) {
2764       load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load);
2765     }
2766 #endif
2767     // load value
2768     switch (type) {
2769     case T_BOOLEAN:
2770     case T_CHAR:
2771     case T_BYTE:
2772     case T_SHORT:
2773     case T_INT:
2774     case T_LONG:
2775     case T_FLOAT:
2776     case T_DOUBLE:
2777       break;
2778     case T_OBJECT:
2779       if (need_read_barrier) {
2780         insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar));
2781       }
2782       break;
2783     case T_ADDRESS:
2784       // Cast to an int type.
2785       load = _gvn.transform(new (C) CastP2XNode(NULL, load));
2786       load = ConvX2UL(load);
2787       break;
2788     default:
2789       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2790       break;
2791     }
2792     // The load node has the control of the preceding MemBarCPUOrder.  All
2793     // following nodes will have the control of the MemBarCPUOrder inserted at
2794     // the end of this method.  So, pushing the load onto the stack at a later
2795     // point is fine.
2796     set_result(load);
2797   } else {
2798     // place effect of store into memory
2799     switch (type) {
2800     case T_DOUBLE:
2801       val = dstore_rounding(val);
2802       break;
2803     case T_ADDRESS:
2804       // Repackage the long as a pointer.
2805       val = ConvL2X(val);
2806       val = _gvn.transform(new (C) CastX2PNode(val));
2807       break;
2808     }
2809 
2810     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2811     if (type == T_OBJECT ) {
2812       store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2813     } else {
2814       store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2815     }
2816   }
2817 
2818   if (is_volatile) {
2819     if (!is_store) {
2820 #if INCLUDE_ALL_GCS
2821       if (UseShenandoahGC) {
2822         load = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(load);
2823       }
2824 #endif
2825       Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
2826       mb->as_MemBar()->set_trailing_load();
2827     } else {
2828       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2829         Node* mb = insert_mem_bar(Op_MemBarVolatile, store);
2830         MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
2831       }
2832     }
2833   }
2834 
2835   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2836 
2837   return true;
2838 }
2839 
2840 //----------------------------inline_unsafe_prefetch----------------------------
2841 
2842 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2843 #ifndef PRODUCT
2844   {
2845     ResourceMark rm;
2846     // Check the signatures.
2847     ciSignature* sig = callee()->signature();
2848 #ifdef ASSERT
2849     // Object getObject(Object base, int/long offset), etc.
2850     BasicType rtype = sig->return_type()->basic_type();


3010   if (kind == LS_xchg && type == T_OBJECT) {
3011     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
3012     if (tjp != NULL) {
3013       value_type = tjp;
3014     }
3015   }
3016 
3017   // Null check receiver.
3018   receiver = null_check(receiver);
3019   if (stopped()) {
3020     return true;
3021   }
3022 
3023   int alias_idx = C->get_alias_index(adr_type);
3024 
3025   // Memory-model-wise, a LoadStore acts like a little synchronized
3026   // block, so needs barriers on each side.  These don't translate
3027   // into actual barriers on most machines, but we still need rest of
3028   // compiler to respect ordering.
3029 
3030   Node* leading_membar = insert_mem_bar(Op_MemBarRelease);
3031   insert_mem_bar(Op_MemBarCPUOrder);
3032 
3033   // 4984716: MemBars must be inserted before this
3034   //          memory node in order to avoid a false
3035   //          dependency which will confuse the scheduler.
3036   Node *mem = memory(alias_idx);
3037 
3038   // For now, we handle only those cases that actually exist: ints,
3039   // longs, and Object. Adding others should be straightforward.
3040   Node* load_store = NULL;
3041   switch(type) {
3042   case T_INT:
3043     if (kind == LS_xadd) {
3044       load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3045     } else if (kind == LS_xchg) {
3046       load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3047     } else if (kind == LS_cmpxchg) {
3048       load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3049     } else {
3050       ShouldNotReachHere();


3109       if (kind == LS_xchg) {
3110         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3111       } else {
3112         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3113         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3114       }
3115     }
3116     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3117     break;
3118   default:
3119     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3120     break;
3121   }
3122 
3123   // SCMemProjNodes represent the memory state of a LoadStore. Their
3124   // main role is to prevent LoadStore nodes from being optimized away
3125   // when their results aren't used.
3126   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3127   set_memory(proj, alias_idx);
3128 
3129   Node* access = load_store;
3130 
3131   if (type == T_OBJECT && kind == LS_xchg) {
3132 #ifdef _LP64
3133     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3134       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3135     }
3136 #endif
3137 #if INCLUDE_ALL_GCS
3138   if (UseShenandoahGC) {
3139     load_store = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load_store);
3140   }
3141 #endif
3142     if (can_move_pre_barrier()) {
3143       // Don't need to load pre_val. The old value is returned by load_store.
3144       // The pre_barrier can execute after the xchg as long as no safepoint
3145       // gets inserted between them.
3146       pre_barrier(false /* do_load */,
3147                   control(), NULL, NULL, max_juint, NULL, NULL,
3148                   load_store /* pre_val */,
3149                   T_OBJECT);
3150     }
3151   }
3152 
3153   // Add the trailing membar surrounding the access
3154   insert_mem_bar(Op_MemBarCPUOrder);
3155   Node* mb = insert_mem_bar(Op_MemBarAcquire, access);
3156   MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
3157 
3158   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3159   set_result(load_store);
3160   return true;
3161 }
3162 
3163 //----------------------------inline_unsafe_ordered_store----------------------
3164 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3165 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3166 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3167 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3168   // This is another variant of inline_unsafe_access, differing in
3169   // that it always issues store-store ("release") barrier and ensures
3170   // store-atomicity (which only matters for "long").
3171 
3172   if (callee()->is_static())  return false;  // caller must have the capability!
3173 
3174 #ifndef PRODUCT
3175   {
3176     ResourceMark rm;


4557   // 16 - 64-bit VM, normal klass
4558   if (base_off % BytesPerLong != 0) {
4559     assert(UseCompressedClassPointers, "");
4560     if (is_array) {
4561       // Exclude length to copy by 8 bytes words.
4562       base_off += sizeof(int);
4563     } else {
4564       // Include klass to copy by 8 bytes words.
4565       base_off = instanceOopDesc::klass_offset_in_bytes();
4566     }
4567     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4568   }
4569   src  = basic_plus_adr(src,  base_off);
4570   dest = basic_plus_adr(dest, base_off);
4571 
4572   // Compute the length also, if needed:
4573   Node* countx = size;
4574   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4575   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4576 
4577 #if INCLUDE_ALL_GCS
4578   if (UseShenandoahGC && ShenandoahCloneBarrier) {
4579     assert (src->is_AddP(), "for clone the src should be the interior ptr");
4580     assert (dest->is_AddP(), "for clone the dst should be the interior ptr");
4581 
4582     // Make sure that references in the cloned object are updated for Shenandoah.
4583     make_runtime_call(RC_LEAF|RC_NO_FP,
4584                       OptoRuntime::shenandoah_clone_barrier_Type(),
4585                       CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
4586                       "shenandoah_clone_barrier", TypePtr::BOTTOM,
4587                       src->in(AddPNode::Base));
4588   }
4589 #endif
4590 
4591   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4592   bool disjoint_bases = true;
4593   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4594                                src, NULL, dest, NULL, countx,
4595                                /*dest_uninitialized*/true);
4596 
4597   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4598   if (card_mark) {
4599     assert(!is_array, "");
4600     // Put in store barrier for any and all oops we are sticking
4601     // into this object.  (We could avoid this if we could prove
4602     // that the object type contains no oop fields at all.)
4603     Node* no_particular_value = NULL;
4604     Node* no_particular_field = NULL;
4605     int raw_adr_idx = Compile::AliasIdxRaw;
4606     post_barrier(control(),
4607                  memory(raw_adr_type),
4608                  alloc_obj,
4609                  no_particular_field,
4610                  raw_adr_idx,


5307       PreserveJVMState pjvms(this);
5308       set_control(not_subtype_ctrl);
5309       // (At this point we can assume disjoint_bases, since types differ.)
5310       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5311       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5312       Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5313       Node* dest_elem_klass = _gvn.transform(n1);
5314       Node* cv = generate_checkcast_arraycopy(adr_type,
5315                                               dest_elem_klass,
5316                                               src, src_offset, dest, dest_offset,
5317                                               ConvI2X(copy_length), dest_uninitialized);
5318       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5319       checked_control = control();
5320       checked_i_o     = i_o();
5321       checked_mem     = memory(adr_type);
5322       checked_value   = cv;
5323     }
5324     // At this point we know we do not need type checks on oop stores.
5325 
5326     // Let's see if we need card marks:
5327     if (alloc != NULL && use_ReduceInitialCardMarks() && ! UseShenandoahGC) {
5328       // If we do not need card marks, copy using the jint or jlong stub.
5329       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5330       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5331              "sizes agree");
5332     }
5333   }
5334 
5335   if (!stopped()) {
5336     // Generate the fast path, if possible.
5337     PreserveJVMState pjvms(this);
5338     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5339                                  src, src_offset, dest, dest_offset,
5340                                  ConvI2X(copy_length), dest_uninitialized);
5341 
5342     // Present the results of the fast call.
5343     result_region->init_req(fast_path, control());
5344     result_i_o   ->init_req(fast_path, i_o());
5345     result_memory->init_req(fast_path, memory(adr_type));
5346   }
5347 


6341 }
6342 
6343 //----------------------------inline_reference_get----------------------------
6344 // public T java.lang.ref.Reference.get();
6345 bool LibraryCallKit::inline_reference_get() {
6346   const int referent_offset = java_lang_ref_Reference::referent_offset;
6347   guarantee(referent_offset > 0, "should have already been set");
6348 
6349   // Get the argument:
6350   Node* reference_obj = null_check_receiver();
6351   if (stopped()) return true;
6352 
6353   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
6354 
6355   ciInstanceKlass* klass = env()->Object_klass();
6356   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6357 
6358   Node* no_ctrl = NULL;
6359   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
6360 
6361 #if INCLUDE_ALL_GCS
6362   if (UseShenandoahGC) {
6363     result = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, result);
6364   }
6365 #endif
6366 
6367   // Use the pre-barrier to record the value in the referent field
6368   pre_barrier(false /* do_load */,
6369               control(),
6370               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
6371               result /* pre_val */,
6372               T_OBJECT);
6373 
6374   // Add memory barrier to prevent commoning reads from this field
6375   // across safepoint since GC can change its value.
6376   insert_mem_bar(Op_MemBarCPUOrder);
6377 
6378   set_result(result);
6379   return true;
6380 }
6381 
6382 
6383 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6384                                               bool is_exact=true, bool is_static=false) {
6385 
6386   const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();


6396 
6397   // Next code  copied from Parse::do_get_xxx():
6398 
6399   // Compute address and memory type.
6400   int offset  = field->offset_in_bytes();
6401   bool is_vol = field->is_volatile();
6402   ciType* field_klass = field->type();
6403   assert(field_klass->is_loaded(), "should be loaded");
6404   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6405   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6406   BasicType bt = field->layout_type();
6407 
6408   // Build the resultant type of the load
6409   const Type *type;
6410   if (bt == T_OBJECT) {
6411     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6412   } else {
6413     type = Type::get_const_basic_type(bt);
6414   }
6415 
6416   Node* leading_membar = NULL;
6417   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6418     leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6419   }
6420   // Build the load.
6421   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6422   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6423 #if INCLUDE_ALL_GCS
6424   if (UseShenandoahGC && (bt == T_OBJECT || bt == T_ARRAY)) {
6425     loadedField = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, loadedField);
6426   }
6427 #endif
6428 
6429   // If reference is volatile, prevent following memory ops from
6430   // floating up past the volatile read.  Also prevents commoning
6431   // another volatile read.
6432   if (is_vol) {
6433     // Memory barrier includes bogus read of value to force load BEFORE membar
6434     Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField);
6435     mb->as_MemBar()->set_trailing_load();
6436   }
6437   return loadedField;
6438 }
6439 
6440 
6441 //------------------------------inline_aescrypt_Block-----------------------
6442 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6443   address stubAddr = NULL;
6444   const char *stubName;
6445   assert(UseAES, "need AES instruction support");
6446 
6447   switch(id) {
6448   case vmIntrinsics::_aescrypt_encryptBlock:
6449     stubAddr = StubRoutines::aescrypt_encryptBlock();
6450     stubName = "aescrypt_encryptBlock";
6451     break;
6452   case vmIntrinsics::_aescrypt_decryptBlock:
6453     stubAddr = StubRoutines::aescrypt_decryptBlock();
6454     stubName = "aescrypt_decryptBlock";
6455     break;


< prev index next >