< prev index next >

src/share/vm/opto/library_call.cpp

Print this page




  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "jfr/support/jfrIntrinsics.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/mathexactnode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "prims/nativeLookup.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/macros.hpp"





  45 
  46 class LibraryIntrinsic : public InlineCallGenerator {
  47   // Extend the set of intrinsics known to the runtime:
  48  public:
  49  private:
  50   bool             _is_virtual;
  51   bool             _does_virtual_dispatch;
  52   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  53   int8_t           _last_predicate; // Last generated predicate
  54   vmIntrinsics::ID _intrinsic_id;
  55 
  56  public:
  57   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  58     : InlineCallGenerator(m),
  59       _is_virtual(is_virtual),
  60       _does_virtual_dispatch(does_virtual_dispatch),
  61       _predicates_count((int8_t)predicates_count),
  62       _last_predicate((int8_t)-1),
  63       _intrinsic_id(id)
  64   {


2413   case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2414   default:  fatal_unexpected_iid(id);  break;
2415   }
2416   set_result(_gvn.transform(n));
2417   return true;
2418 }
2419 
2420 //----------------------------inline_unsafe_access----------------------------
2421 
2422 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2423 
2424 // Helper that guards and inserts a pre-barrier.
2425 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2426                                         Node* pre_val, bool need_mem_bar) {
2427   // We could be accessing the referent field of a reference object. If so, when G1
2428   // is enabled, we need to log the value in the referent field in an SATB buffer.
2429   // This routine performs some compile time filters and generates suitable
2430   // runtime filters that guard the pre-barrier code.
2431   // Also add memory barrier for non volatile load from the referent field
2432   // to prevent commoning of loads across safepoint.
2433   if (!UseG1GC && !need_mem_bar)
2434     return;
2435 
2436   // Some compile time checks.
2437 
2438   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2439   const TypeX* otype = offset->find_intptr_t_type();
2440   if (otype != NULL && otype->is_con() &&
2441       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2442     // Constant offset but not the reference_offset so just return
2443     return;
2444   }
2445 
2446   // We only need to generate the runtime guards for instances.
2447   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2448   if (btype != NULL) {
2449     if (btype->isa_aryptr()) {
2450       // Array type so nothing to do
2451       return;
2452     }
2453 


2670     }
2671     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2672       // Don't intrinsify mismatched object accesses
2673       return false;
2674     }
2675     mismatched = (bt != type);
2676   }
2677 
2678   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2679 
2680   // First guess at the value type.
2681   const Type *value_type = Type::get_const_basic_type(type);
2682 
2683   // We will need memory barriers unless we can determine a unique
2684   // alias category for this reference.  (Note:  If for some reason
2685   // the barriers get omitted and the unsafe reference begins to "pollute"
2686   // the alias analysis of the rest of the graph, either Compile::can_alias
2687   // or Compile::must_alias will throw a diagnostic assert.)
2688   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2689 








2690   // If we are reading the value of the referent field of a Reference
2691   // object (either by using Unsafe directly or through reflection)
2692   // then, if G1 is enabled, we need to record the referent in an
2693   // SATB log buffer using the pre-barrier mechanism.
2694   // Also we need to add memory barrier to prevent commoning reads
2695   // from this field across safepoint since GC can change its value.
2696   bool need_read_barrier = !is_native_ptr && !is_store &&
2697                            offset != top() && heap_base_oop != top();
2698 
2699   if (!is_store && type == T_OBJECT) {
2700     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2701     if (tjp != NULL) {
2702       value_type = tjp;
2703     }
2704   }
2705 
2706   receiver = null_check(receiver);
2707   if (stopped()) {
2708     return true;
2709   }


2729     } else {
2730       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2731         leading_membar = insert_mem_bar(Op_MemBarVolatile);
2732       }
2733     }
2734   }
2735 
2736   // Memory barrier to prevent normal and 'unsafe' accesses from
2737   // bypassing each other.  Happens after null checks, so the
2738   // exception paths do not take memory state from the memory barrier,
2739   // so there's no problems making a strong assert about mixing users
2740   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2741   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2742   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2743 
2744   if (!is_store) {
2745     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2746     // To be valid, unsafe loads may depend on other conditions than
2747     // the one that guards them: pin the Load node
2748     load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);





2749     // load value
2750     switch (type) {
2751     case T_BOOLEAN:
2752     case T_CHAR:
2753     case T_BYTE:
2754     case T_SHORT:
2755     case T_INT:
2756     case T_LONG:
2757     case T_FLOAT:
2758     case T_DOUBLE:
2759       break;
2760     case T_OBJECT:
2761       if (need_read_barrier) {
2762         insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar));
2763       }
2764       break;
2765     case T_ADDRESS:
2766       // Cast to an int type.
2767       load = _gvn.transform(new (C) CastP2XNode(NULL, load));
2768       load = ConvX2UL(load);


2782     case T_DOUBLE:
2783       val = dstore_rounding(val);
2784       break;
2785     case T_ADDRESS:
2786       // Repackage the long as a pointer.
2787       val = ConvL2X(val);
2788       val = _gvn.transform(new (C) CastX2PNode(val));
2789       break;
2790     }
2791 
2792     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2793     if (type == T_OBJECT ) {
2794       store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2795     } else {
2796       store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2797     }
2798   }
2799 
2800   if (is_volatile) {
2801     if (!is_store) {





2802       Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
2803       mb->as_MemBar()->set_trailing_load();
2804     } else {
2805       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2806         Node* mb = insert_mem_bar(Op_MemBarVolatile, store);
2807         MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
2808       }
2809     }
2810   }
2811 
2812   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2813 
2814   return true;
2815 }
2816 
2817 //----------------------------inline_unsafe_prefetch----------------------------
2818 
2819 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2820 #ifndef PRODUCT
2821   {


3094     break;
3095   default:
3096     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3097     break;
3098   }
3099 
3100   // SCMemProjNodes represent the memory state of a LoadStore. Their
3101   // main role is to prevent LoadStore nodes from being optimized away
3102   // when their results aren't used.
3103   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3104   set_memory(proj, alias_idx);
3105 
3106   Node* access = load_store;
3107 
3108   if (type == T_OBJECT && kind == LS_xchg) {
3109 #ifdef _LP64
3110     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3111       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3112     }
3113 #endif





3114     if (can_move_pre_barrier()) {
3115       // Don't need to load pre_val. The old value is returned by load_store.
3116       // The pre_barrier can execute after the xchg as long as no safepoint
3117       // gets inserted between them.
3118       pre_barrier(false /* do_load */,
3119                   control(), NULL, NULL, max_juint, NULL, NULL,
3120                   load_store /* pre_val */,
3121                   T_OBJECT);
3122     }
3123   }
3124 
3125   // Add the trailing membar surrounding the access
3126   insert_mem_bar(Op_MemBarCPUOrder);
3127   Node* mb = insert_mem_bar(Op_MemBarAcquire, access);
3128   MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
3129 
3130   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3131   set_result(load_store);
3132   return true;
3133 }


4529   // 16 - 64-bit VM, normal klass
4530   if (base_off % BytesPerLong != 0) {
4531     assert(UseCompressedClassPointers, "");
4532     if (is_array) {
4533       // Exclude length to copy by 8 bytes words.
4534       base_off += sizeof(int);
4535     } else {
4536       // Include klass to copy by 8 bytes words.
4537       base_off = instanceOopDesc::klass_offset_in_bytes();
4538     }
4539     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4540   }
4541   src  = basic_plus_adr(src,  base_off);
4542   dest = basic_plus_adr(dest, base_off);
4543 
4544   // Compute the length also, if needed:
4545   Node* countx = size;
4546   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4547   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4548 














4549   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4550   bool disjoint_bases = true;
4551   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4552                                src, NULL, dest, NULL, countx,
4553                                /*dest_uninitialized*/true);
4554 
4555   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4556   if (card_mark) {
4557     assert(!is_array, "");
4558     // Put in store barrier for any and all oops we are sticking
4559     // into this object.  (We could avoid this if we could prove
4560     // that the object type contains no oop fields at all.)
4561     Node* no_particular_value = NULL;
4562     Node* no_particular_field = NULL;
4563     int raw_adr_idx = Compile::AliasIdxRaw;
4564     post_barrier(control(),
4565                  memory(raw_adr_type),
4566                  alloc_obj,
4567                  no_particular_field,
4568                  raw_adr_idx,


5265       PreserveJVMState pjvms(this);
5266       set_control(not_subtype_ctrl);
5267       // (At this point we can assume disjoint_bases, since types differ.)
5268       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5269       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5270       Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5271       Node* dest_elem_klass = _gvn.transform(n1);
5272       Node* cv = generate_checkcast_arraycopy(adr_type,
5273                                               dest_elem_klass,
5274                                               src, src_offset, dest, dest_offset,
5275                                               ConvI2X(copy_length), dest_uninitialized);
5276       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5277       checked_control = control();
5278       checked_i_o     = i_o();
5279       checked_mem     = memory(adr_type);
5280       checked_value   = cv;
5281     }
5282     // At this point we know we do not need type checks on oop stores.
5283 
5284     // Let's see if we need card marks:
5285     if (alloc != NULL && use_ReduceInitialCardMarks()) {
5286       // If we do not need card marks, copy using the jint or jlong stub.
5287       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5288       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5289              "sizes agree");
5290     }
5291   }
5292 
5293   if (!stopped()) {
5294     // Generate the fast path, if possible.
5295     PreserveJVMState pjvms(this);
5296     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5297                                  src, src_offset, dest, dest_offset,
5298                                  ConvI2X(copy_length), dest_uninitialized);
5299 
5300     // Present the results of the fast call.
5301     result_region->init_req(fast_path, control());
5302     result_i_o   ->init_req(fast_path, i_o());
5303     result_memory->init_req(fast_path, memory(adr_type));
5304   }
5305 


6299 }
6300 
6301 //----------------------------inline_reference_get----------------------------
6302 // public T java.lang.ref.Reference.get();
6303 bool LibraryCallKit::inline_reference_get() {
6304   const int referent_offset = java_lang_ref_Reference::referent_offset;
6305   guarantee(referent_offset > 0, "should have already been set");
6306 
6307   // Get the argument:
6308   Node* reference_obj = null_check_receiver();
6309   if (stopped()) return true;
6310 
6311   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
6312 
6313   ciInstanceKlass* klass = env()->Object_klass();
6314   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6315 
6316   Node* no_ctrl = NULL;
6317   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
6318 






6319   // Use the pre-barrier to record the value in the referent field
6320   pre_barrier(false /* do_load */,
6321               control(),
6322               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
6323               result /* pre_val */,
6324               T_OBJECT);
6325 
6326   // Add memory barrier to prevent commoning reads from this field
6327   // across safepoint since GC can change its value.
6328   insert_mem_bar(Op_MemBarCPUOrder);
6329 
6330   set_result(result);
6331   return true;
6332 }
6333 
6334 
6335 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6336                                               bool is_exact=true, bool is_static=false) {
6337 
6338   const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();


6355   assert(field_klass->is_loaded(), "should be loaded");
6356   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6357   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6358   BasicType bt = field->layout_type();
6359 
6360   // Build the resultant type of the load
6361   const Type *type;
6362   if (bt == T_OBJECT) {
6363     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6364   } else {
6365     type = Type::get_const_basic_type(bt);
6366   }
6367 
6368   Node* leading_membar = NULL;
6369   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6370     leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6371   }
6372   // Build the load.
6373   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6374   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);






6375   // If reference is volatile, prevent following memory ops from
6376   // floating up past the volatile read.  Also prevents commoning
6377   // another volatile read.
6378   if (is_vol) {
6379     // Memory barrier includes bogus read of value to force load BEFORE membar
6380     Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField);
6381     mb->as_MemBar()->set_trailing_load();
6382   }
6383   return loadedField;
6384 }
6385 
6386 
6387 //------------------------------inline_aescrypt_Block-----------------------
6388 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6389   address stubAddr = NULL;
6390   const char *stubName;
6391   assert(UseAES, "need AES instruction support");
6392 
6393   switch(id) {
6394   case vmIntrinsics::_aescrypt_encryptBlock:




  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "jfr/support/jfrIntrinsics.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/mathexactnode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "prims/nativeLookup.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/macros.hpp"
  45 #if INCLUDE_ALL_GCS
  46 #include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
  47 #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  48 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
  49 #endif
  50 
  51 class LibraryIntrinsic : public InlineCallGenerator {
  52   // Extend the set of intrinsics known to the runtime:
  53  public:
  54  private:
  55   bool             _is_virtual;
  56   bool             _does_virtual_dispatch;
  57   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  58   int8_t           _last_predicate; // Last generated predicate
  59   vmIntrinsics::ID _intrinsic_id;
  60 
  61  public:
  62   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  63     : InlineCallGenerator(m),
  64       _is_virtual(is_virtual),
  65       _does_virtual_dispatch(does_virtual_dispatch),
  66       _predicates_count((int8_t)predicates_count),
  67       _last_predicate((int8_t)-1),
  68       _intrinsic_id(id)
  69   {


2418   case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2419   default:  fatal_unexpected_iid(id);  break;
2420   }
2421   set_result(_gvn.transform(n));
2422   return true;
2423 }
2424 
2425 //----------------------------inline_unsafe_access----------------------------
2426 
2427 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2428 
2429 // Helper that guards and inserts a pre-barrier.
2430 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2431                                         Node* pre_val, bool need_mem_bar) {
2432   // We could be accessing the referent field of a reference object. If so, when G1
2433   // is enabled, we need to log the value in the referent field in an SATB buffer.
2434   // This routine performs some compile time filters and generates suitable
2435   // runtime filters that guard the pre-barrier code.
2436   // Also add memory barrier for non volatile load from the referent field
2437   // to prevent commoning of loads across safepoint.
2438   if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar)
2439     return;
2440 
2441   // Some compile time checks.
2442 
2443   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2444   const TypeX* otype = offset->find_intptr_t_type();
2445   if (otype != NULL && otype->is_con() &&
2446       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2447     // Constant offset but not the reference_offset so just return
2448     return;
2449   }
2450 
2451   // We only need to generate the runtime guards for instances.
2452   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2453   if (btype != NULL) {
2454     if (btype->isa_aryptr()) {
2455       // Array type so nothing to do
2456       return;
2457     }
2458 


2675     }
2676     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2677       // Don't intrinsify mismatched object accesses
2678       return false;
2679     }
2680     mismatched = (bt != type);
2681   }
2682 
2683   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2684 
2685   // First guess at the value type.
2686   const Type *value_type = Type::get_const_basic_type(type);
2687 
2688   // We will need memory barriers unless we can determine a unique
2689   // alias category for this reference.  (Note:  If for some reason
2690   // the barriers get omitted and the unsafe reference begins to "pollute"
2691   // the alias analysis of the rest of the graph, either Compile::can_alias
2692   // or Compile::must_alias will throw a diagnostic assert.)
2693   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2694 
2695 #if INCLUDE_ALL_GCS
2696   // Work around JDK-8220714 bug. This is done for Shenandoah only, until
2697   // the shared code fix is upstreamed and properly tested there.
2698   if (UseShenandoahGC) {
2699     need_mem_bar |= is_native_ptr;
2700   }
2701 #endif
2702 
2703   // If we are reading the value of the referent field of a Reference
2704   // object (either by using Unsafe directly or through reflection)
2705   // then, if G1 is enabled, we need to record the referent in an
2706   // SATB log buffer using the pre-barrier mechanism.
2707   // Also we need to add memory barrier to prevent commoning reads
2708   // from this field across safepoint since GC can change its value.
2709   bool need_read_barrier = !is_native_ptr && !is_store &&
2710                            offset != top() && heap_base_oop != top();
2711 
2712   if (!is_store && type == T_OBJECT) {
2713     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2714     if (tjp != NULL) {
2715       value_type = tjp;
2716     }
2717   }
2718 
2719   receiver = null_check(receiver);
2720   if (stopped()) {
2721     return true;
2722   }


2742     } else {
2743       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2744         leading_membar = insert_mem_bar(Op_MemBarVolatile);
2745       }
2746     }
2747   }
2748 
2749   // Memory barrier to prevent normal and 'unsafe' accesses from
2750   // bypassing each other.  Happens after null checks, so the
2751   // exception paths do not take memory state from the memory barrier,
2752   // so there's no problems making a strong assert about mixing users
2753   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2754   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2755   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2756 
2757   if (!is_store) {
2758     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2759     // To be valid, unsafe loads may depend on other conditions than
2760     // the one that guards them: pin the Load node
2761     load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2762 #if INCLUDE_ALL_GCS
2763     if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) {
2764       load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load);
2765     }
2766 #endif
2767     // load value
2768     switch (type) {
2769     case T_BOOLEAN:
2770     case T_CHAR:
2771     case T_BYTE:
2772     case T_SHORT:
2773     case T_INT:
2774     case T_LONG:
2775     case T_FLOAT:
2776     case T_DOUBLE:
2777       break;
2778     case T_OBJECT:
2779       if (need_read_barrier) {
2780         insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar));
2781       }
2782       break;
2783     case T_ADDRESS:
2784       // Cast to an int type.
2785       load = _gvn.transform(new (C) CastP2XNode(NULL, load));
2786       load = ConvX2UL(load);


2800     case T_DOUBLE:
2801       val = dstore_rounding(val);
2802       break;
2803     case T_ADDRESS:
2804       // Repackage the long as a pointer.
2805       val = ConvL2X(val);
2806       val = _gvn.transform(new (C) CastX2PNode(val));
2807       break;
2808     }
2809 
2810     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2811     if (type == T_OBJECT ) {
2812       store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2813     } else {
2814       store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2815     }
2816   }
2817 
2818   if (is_volatile) {
2819     if (!is_store) {
2820 #if INCLUDE_ALL_GCS
2821       if (UseShenandoahGC) {
2822         load = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(load);
2823       }
2824 #endif
2825       Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
2826       mb->as_MemBar()->set_trailing_load();
2827     } else {
2828       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2829         Node* mb = insert_mem_bar(Op_MemBarVolatile, store);
2830         MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
2831       }
2832     }
2833   }
2834 
2835   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2836 
2837   return true;
2838 }
2839 
2840 //----------------------------inline_unsafe_prefetch----------------------------
2841 
2842 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2843 #ifndef PRODUCT
2844   {


3117     break;
3118   default:
3119     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3120     break;
3121   }
3122 
3123   // SCMemProjNodes represent the memory state of a LoadStore. Their
3124   // main role is to prevent LoadStore nodes from being optimized away
3125   // when their results aren't used.
3126   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3127   set_memory(proj, alias_idx);
3128 
3129   Node* access = load_store;
3130 
3131   if (type == T_OBJECT && kind == LS_xchg) {
3132 #ifdef _LP64
3133     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3134       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3135     }
3136 #endif
3137 #if INCLUDE_ALL_GCS
3138   if (UseShenandoahGC) {
3139     load_store = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load_store);
3140   }
3141 #endif
3142     if (can_move_pre_barrier()) {
3143       // Don't need to load pre_val. The old value is returned by load_store.
3144       // The pre_barrier can execute after the xchg as long as no safepoint
3145       // gets inserted between them.
3146       pre_barrier(false /* do_load */,
3147                   control(), NULL, NULL, max_juint, NULL, NULL,
3148                   load_store /* pre_val */,
3149                   T_OBJECT);
3150     }
3151   }
3152 
3153   // Add the trailing membar surrounding the access
3154   insert_mem_bar(Op_MemBarCPUOrder);
3155   Node* mb = insert_mem_bar(Op_MemBarAcquire, access);
3156   MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
3157 
3158   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3159   set_result(load_store);
3160   return true;
3161 }


4557   // 16 - 64-bit VM, normal klass
4558   if (base_off % BytesPerLong != 0) {
4559     assert(UseCompressedClassPointers, "");
4560     if (is_array) {
4561       // Exclude length to copy by 8 bytes words.
4562       base_off += sizeof(int);
4563     } else {
4564       // Include klass to copy by 8 bytes words.
4565       base_off = instanceOopDesc::klass_offset_in_bytes();
4566     }
4567     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4568   }
4569   src  = basic_plus_adr(src,  base_off);
4570   dest = basic_plus_adr(dest, base_off);
4571 
4572   // Compute the length also, if needed:
4573   Node* countx = size;
4574   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4575   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4576 
4577 #if INCLUDE_ALL_GCS
4578   if (UseShenandoahGC && ShenandoahCloneBarrier) {
4579     assert (src->is_AddP(), "for clone the src should be the interior ptr");
4580     assert (dest->is_AddP(), "for clone the dst should be the interior ptr");
4581 
4582     // Make sure that references in the cloned object are updated for Shenandoah.
4583     make_runtime_call(RC_LEAF|RC_NO_FP,
4584                       OptoRuntime::shenandoah_clone_barrier_Type(),
4585                       CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
4586                       "shenandoah_clone_barrier", TypePtr::BOTTOM,
4587                       src->in(AddPNode::Base));
4588   }
4589 #endif
4590 
4591   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4592   bool disjoint_bases = true;
4593   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4594                                src, NULL, dest, NULL, countx,
4595                                /*dest_uninitialized*/true);
4596 
4597   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4598   if (card_mark) {
4599     assert(!is_array, "");
4600     // Put in store barrier for any and all oops we are sticking
4601     // into this object.  (We could avoid this if we could prove
4602     // that the object type contains no oop fields at all.)
4603     Node* no_particular_value = NULL;
4604     Node* no_particular_field = NULL;
4605     int raw_adr_idx = Compile::AliasIdxRaw;
4606     post_barrier(control(),
4607                  memory(raw_adr_type),
4608                  alloc_obj,
4609                  no_particular_field,
4610                  raw_adr_idx,


5307       PreserveJVMState pjvms(this);
5308       set_control(not_subtype_ctrl);
5309       // (At this point we can assume disjoint_bases, since types differ.)
5310       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5311       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5312       Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5313       Node* dest_elem_klass = _gvn.transform(n1);
5314       Node* cv = generate_checkcast_arraycopy(adr_type,
5315                                               dest_elem_klass,
5316                                               src, src_offset, dest, dest_offset,
5317                                               ConvI2X(copy_length), dest_uninitialized);
5318       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5319       checked_control = control();
5320       checked_i_o     = i_o();
5321       checked_mem     = memory(adr_type);
5322       checked_value   = cv;
5323     }
5324     // At this point we know we do not need type checks on oop stores.
5325 
5326     // Let's see if we need card marks:
5327     if (alloc != NULL && use_ReduceInitialCardMarks() && ! UseShenandoahGC) {
5328       // If we do not need card marks, copy using the jint or jlong stub.
5329       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5330       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5331              "sizes agree");
5332     }
5333   }
5334 
5335   if (!stopped()) {
5336     // Generate the fast path, if possible.
5337     PreserveJVMState pjvms(this);
5338     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5339                                  src, src_offset, dest, dest_offset,
5340                                  ConvI2X(copy_length), dest_uninitialized);
5341 
5342     // Present the results of the fast call.
5343     result_region->init_req(fast_path, control());
5344     result_i_o   ->init_req(fast_path, i_o());
5345     result_memory->init_req(fast_path, memory(adr_type));
5346   }
5347 


6341 }
6342 
6343 //----------------------------inline_reference_get----------------------------
6344 // public T java.lang.ref.Reference.get();
6345 bool LibraryCallKit::inline_reference_get() {
6346   const int referent_offset = java_lang_ref_Reference::referent_offset;
6347   guarantee(referent_offset > 0, "should have already been set");
6348 
6349   // Get the argument:
6350   Node* reference_obj = null_check_receiver();
6351   if (stopped()) return true;
6352 
6353   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
6354 
6355   ciInstanceKlass* klass = env()->Object_klass();
6356   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6357 
6358   Node* no_ctrl = NULL;
6359   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
6360 
6361 #if INCLUDE_ALL_GCS
6362   if (UseShenandoahGC) {
6363     result = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, result);
6364   }
6365 #endif
6366 
6367   // Use the pre-barrier to record the value in the referent field
6368   pre_barrier(false /* do_load */,
6369               control(),
6370               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
6371               result /* pre_val */,
6372               T_OBJECT);
6373 
6374   // Add memory barrier to prevent commoning reads from this field
6375   // across safepoint since GC can change its value.
6376   insert_mem_bar(Op_MemBarCPUOrder);
6377 
6378   set_result(result);
6379   return true;
6380 }
6381 
6382 
6383 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6384                                               bool is_exact=true, bool is_static=false) {
6385 
6386   const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();


6403   assert(field_klass->is_loaded(), "should be loaded");
6404   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6405   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6406   BasicType bt = field->layout_type();
6407 
6408   // Build the resultant type of the load
6409   const Type *type;
6410   if (bt == T_OBJECT) {
6411     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6412   } else {
6413     type = Type::get_const_basic_type(bt);
6414   }
6415 
6416   Node* leading_membar = NULL;
6417   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6418     leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6419   }
6420   // Build the load.
6421   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6422   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6423 #if INCLUDE_ALL_GCS
6424   if (UseShenandoahGC && (bt == T_OBJECT || bt == T_ARRAY)) {
6425     loadedField = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, loadedField);
6426   }
6427 #endif
6428 
6429   // If reference is volatile, prevent following memory ops from
6430   // floating up past the volatile read.  Also prevents commoning
6431   // another volatile read.
6432   if (is_vol) {
6433     // Memory barrier includes bogus read of value to force load BEFORE membar
6434     Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField);
6435     mb->as_MemBar()->set_trailing_load();
6436   }
6437   return loadedField;
6438 }
6439 
6440 
6441 //------------------------------inline_aescrypt_Block-----------------------
6442 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6443   address stubAddr = NULL;
6444   const char *stubName;
6445   assert(UseAES, "need AES instruction support");
6446 
6447   switch(id) {
6448   case vmIntrinsics::_aescrypt_encryptBlock:


< prev index next >