< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

        

@@ -55,10 +55,14 @@
 #include "prims/nativeLookup.hpp"
 #include "prims/unsafe.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/macros.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#endif
 
 
 class LibraryIntrinsic : public InlineCallGenerator {
   // Extend the set of intrinsics known to the runtime:
  public:

@@ -242,11 +246,11 @@
   bool inline_min_max(vmIntrinsics::ID id);
   bool inline_notify(vmIntrinsics::ID id);
   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
   // This returns Type::AnyPtr, RawPtr, or OopPtr.
   int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
-  Node* make_unsafe_address(Node*& base, Node* offset, BasicType type = T_ILLEGAL, bool can_cast = false);
+  Node* make_unsafe_address(Node*& base, Node* offset, bool is_store, BasicType type = T_ILLEGAL, bool can_cast = false);
 
   typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
   DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
   bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
   static bool klass_needs_init_guard(Node* kls);

@@ -333,10 +337,14 @@
     if (UseAVX >= 2) {
       C->set_clear_upper_avx(true);
     }
 #endif
   }
+
+  Node* shenandoah_must_be_not_null(Node* n, bool f) {
+    return UseShenandoahGC ? must_be_not_null(n, f) : n;
+  }
 };
 
 //---------------------------make_vm_intrinsic----------------------------
 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
   vmIntrinsics::ID id = m->intrinsic_id();

@@ -1105,10 +1113,16 @@
 //------------------------------inline_string_compareTo------------------------
 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
   Node* arg1 = argument(0);
   Node* arg2 = argument(1);
 
+  arg1 = shenandoah_must_be_not_null(arg1, true);
+  arg2 = shenandoah_must_be_not_null(arg2, true);
+
+  arg1 = access_resolve_for_read(arg1);
+  arg2 = access_resolve_for_read(arg2);
+
   // Get start addr and length of first argument
   Node* arg1_start  = array_element_address(arg1, intcon(0), T_BYTE);
   Node* arg1_cnt    = load_array_length(arg1);
 
   // Get start addr and length of second argument

@@ -1128,11 +1142,18 @@
   // paths (plus control) merge
   RegionNode* region = new RegionNode(3);
   Node* phi = new PhiNode(region, TypeInt::BOOL);
 
   if (!stopped()) {
-    // Get start addr and length of first argument
+
+    arg1 = shenandoah_must_be_not_null(arg1, true);
+    arg2 = shenandoah_must_be_not_null(arg2, true);
+
+    arg1 = access_resolve_for_read(arg1);
+    arg2 = access_resolve_for_read(arg2);
+
+   // Get start addr and length of first argument
     Node* arg1_start  = array_element_address(arg1, intcon(0), T_BYTE);
     Node* arg1_cnt    = load_array_length(arg1);
 
     // Get start addr and length of second argument
     Node* arg2_start  = array_element_address(arg2, intcon(0), T_BYTE);

@@ -1168,10 +1189,13 @@
 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
   assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
   Node* arg1 = argument(0);
   Node* arg2 = argument(1);
 
+  arg1 = access_resolve_for_read(arg1);
+  arg2 = access_resolve_for_read(arg2);
+
   const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
   set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
   clear_upper_avx();
 
   return true;

@@ -1187,15 +1211,20 @@
   // no receiver since it is static method
   Node* ba         = argument(0);
   Node* offset     = argument(1);
   Node* len        = argument(2);
 
+  ba = shenandoah_must_be_not_null(ba, true);
+
   // Range checks
   generate_string_range_check(ba, offset, len, false);
   if (stopped()) {
     return true;
   }
+
+  ba = access_resolve_for_read(ba);
+
   Node* ba_start = array_element_address(ba, offset, T_BYTE);
   Node* result = new HasNegativesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
   set_result(_gvn.transform(result));
   return true;
 }

@@ -1259,10 +1288,16 @@
 
   // Make the merge point
   RegionNode* result_rgn = new RegionNode(4);
   Node*       result_phi = new PhiNode(result_rgn, TypeInt::INT);
 
+  src = shenandoah_must_be_not_null(src, true);
+  tgt = shenandoah_must_be_not_null(tgt, true);
+
+  src = access_resolve_for_read(src);
+  tgt = access_resolve_for_read(tgt);
+
   // Get start addr and length of source string
   Node* src_start = array_element_address(src, intcon(0), T_BYTE);
   Node* src_count = load_array_length(src);
 
   // Get start addr and length of substring

@@ -1303,10 +1338,16 @@
   Node* src_count   = argument(1); // char count
   Node* tgt         = argument(2); // byte[]
   Node* tgt_count   = argument(3); // char count
   Node* from_index  = argument(4); // char index
 
+  src = shenandoah_must_be_not_null(src, true);
+  tgt = shenandoah_must_be_not_null(tgt, true);
+
+  src = access_resolve_for_read(src);
+  tgt = access_resolve_for_read(tgt);
+
   // Multiply byte array index by 2 if String is UTF16 encoded
   Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
   src_count = _gvn.transform(new SubINode(src_count, from_index));
   Node* src_start = array_element_address(src, src_offset, T_BYTE);
   Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);

@@ -1388,10 +1429,13 @@
   Node* src         = argument(0); // byte[]
   Node* tgt         = argument(1); // tgt is int ch
   Node* from_index  = argument(2);
   Node* max         = argument(3);
 
+  src = shenandoah_must_be_not_null(src, true);
+  src = access_resolve_for_read(src);
+
   Node* src_offset = _gvn.transform(new LShiftINode(from_index, intcon(1)));
   Node* src_start = array_element_address(src, src_offset, T_BYTE);
   Node* src_count = _gvn.transform(new SubINode(max, from_index));
 
   // Range checks

@@ -1458,10 +1502,13 @@
   BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
   assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
          (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
          "Unsupported array types for inline_string_copy");
 
+  src = shenandoah_must_be_not_null(src, true);
+  dst = shenandoah_must_be_not_null(dst, true);
+
   // Convert char[] offsets to byte[] offsets
   bool convert_src = (compress && src_elem == T_BYTE);
   bool convert_dst = (!compress && dst_elem == T_BYTE);
   if (convert_src) {
     src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));

@@ -1474,10 +1521,13 @@
   generate_string_range_check(dst, dst_offset, length, convert_dst);
   if (stopped()) {
     return true;
   }
 
+  src = access_resolve_for_read(src);
+  dst = access_resolve_for_write(dst);
+
   Node* src_start = array_element_address(src, src_offset, src_elem);
   Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
   // 'src_start' points to src array + scaled offset
   // 'dst_start' points to dst array + scaled offset
   Node* count = NULL;

@@ -1564,10 +1614,12 @@
     Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
     newcopy = new_array(klass_node, size, 0);  // no arguments to push
     AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy, NULL);
 
     // Calculate starting addresses.
+    value = access_resolve_for_read(value);
+
     Node* src_start = array_element_address(value, offset, T_CHAR);
     Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
 
     // Check if src array address is aligned to HeapWordSize (dst is always aligned)
     const TypeInt* toffset = gvn().type(offset)->is_int();

@@ -1647,10 +1699,14 @@
   if (stopped()) {
     return true;
   }
 
   if (!stopped()) {
+
+    src = access_resolve_for_read(src);
+    dst = access_resolve_for_write(dst);
+
     // Calculate starting addresses.
     Node* src_start = array_element_address(src, src_begin, T_BYTE);
     Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
 
     // Check if array addresses are aligned to HeapWordSize

@@ -1714,10 +1770,18 @@
   // Java method would constant fold nicely instead.
   if (!is_store && value->is_Con() && index->is_Con()) {
     return false;
   }
 
+  value = shenandoah_must_be_not_null(value, true);
+
+  if (is_store) {
+    value = access_resolve_for_write(value);
+  } else {
+    value = access_resolve_for_read(value);
+  }
+
   Node* adr = array_element_address(value, index, T_CHAR);
   if (adr->is_top()) {
     return false;
   }
   if (is_store) {

@@ -2151,11 +2215,11 @@
     // No information:
     return Type::AnyPtr;
   }
 }
 
-inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
+inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, bool is_store, BasicType type, bool can_cast) {
   Node* uncasted_base = base;
   int kind = classify_unsafe_addr(uncasted_base, offset, type);
   if (kind == Type::RawPtr) {
     return basic_plus_adr(top(), uncasted_base, offset);
   } else if (kind == Type::AnyPtr) {

@@ -2167,11 +2231,22 @@
         // heap. Casting the base to not null and thus avoiding membars
         // around the access should allow better optimizations
         Node* null_ctl = top();
         base = null_check_oop(base, &null_ctl, true, true, true);
         assert(null_ctl->is_top(), "no null control here");
-        return basic_plus_adr(base, offset);
+        Node* new_base = base;
+#if INCLUDE_SHENANDOAHGC
+        if (UseShenandoahGC &&
+            ((ShenandoahWriteBarrier && is_store) || (ShenandoahReadBarrier && !is_store))) {
+          if (is_store) {
+            new_base = access_resolve_for_write(base);
+          } else {
+            new_base = access_resolve_for_read(base);
+          }
+        }
+#endif
+        return basic_plus_adr(new_base, offset);
       } else if (_gvn.type(base)->speculative_always_null() &&
                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
         // According to profiling, this access is always off
         // heap.
         base = null_assert(base);

@@ -2180,19 +2255,41 @@
         return basic_plus_adr(top(), raw_base, offset);
       }
     }
     // We don't know if it's an on heap or off heap access. Fall back
     // to raw memory access.
-    Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
+    Node* new_base = base;
+#if INCLUDE_SHENANDOAHGC
+    if (UseShenandoahGC &&
+            ((ShenandoahWriteBarrier && is_store) || (ShenandoahReadBarrier && !is_store))) {
+      if (is_store) {
+        new_base = access_resolve_for_write(base);
+      } else {
+        new_base = access_resolve_for_read(base);
+      }
+    }
+#endif
+    Node* raw = _gvn.transform(new CheckCastPPNode(control(), new_base, TypeRawPtr::BOTTOM));
     return basic_plus_adr(top(), raw, offset);
   } else {
     assert(base == uncasted_base, "unexpected base change");
     // We know it's an on heap access so base can't be null
     if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
       base = must_be_not_null(base, true);
     }
-    return basic_plus_adr(base, offset);
+    Node* new_base = base;
+#if INCLUDE_SHENANDOAHGC
+    if (UseShenandoahGC &&
+            ((ShenandoahWriteBarrier && is_store) || (ShenandoahReadBarrier && !is_store))) {
+      if (is_store) {
+        new_base = access_resolve_for_write(base);
+      } else {
+        new_base = access_resolve_for_read(base);
+      }
+    }
+#endif
+    return basic_plus_adr(new_base, offset);
   }
 }
 
 //--------------------------inline_number_methods-----------------------------
 // inline int     Integer.numberOfLeadingZeros(int)

@@ -2346,12 +2443,11 @@
   // by oopDesc::field_addr.
   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
          "fieldOffset must be byte-scaled");
   // 32-bit machines ignore the high half!
   offset = ConvL2X(offset);
-  adr = make_unsafe_address(base, offset, type, kind == Relaxed);
-
+  adr = make_unsafe_address(base, offset, is_store, type, kind == Relaxed);
   if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
     heap_base_oop = base;
   } else if (type == T_OBJECT) {
     return false; // off-heap oop accesses are not supported
   }

@@ -2630,11 +2726,11 @@
   // to be plain byte offsets, which are also the same as those accepted
   // by oopDesc::field_addr.
   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
   // 32-bit machines ignore the high half of long offsets
   offset = ConvL2X(offset);
-  Node* adr = make_unsafe_address(base, offset, type, false);
+  Node* adr = make_unsafe_address(base, offset, true, type, false);
   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
 
   Compile::AliasType* alias_type = C->alias_type(adr_type);
   BasicType bt = alias_type->basic_type();
   if (bt != T_ILLEGAL &&

@@ -2925,10 +3021,13 @@
 
   // (a) Receiving thread must be the current thread.
   Node* rec_thr = argument(0);
   Node* tls_ptr = NULL;
   Node* cur_thr = generate_current_thread(tls_ptr);
+
+  cur_thr = access_resolve_for_write(cur_thr);
+  rec_thr = access_resolve_for_write(rec_thr);
   Node* cmp_thr = _gvn.transform(new CmpPNode(cur_thr, rec_thr));
   Node* bol_thr = _gvn.transform(new BoolNode(cmp_thr, BoolTest::ne));
 
   generate_slow_guard(bol_thr, slow_region);
 

@@ -3340,11 +3439,13 @@
     PATH_LIMIT
   };
 
   RegionNode* region = new RegionNode(PATH_LIMIT);
   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
+  Node*       mem_phi= new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
   record_for_igvn(region);
+  Node* init_mem = map()->memory();
 
   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
   const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
   int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
 

@@ -3359,10 +3460,13 @@
     Node* p = basic_plus_adr(arg, class_klass_offset);
     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
     klasses[which_arg] = _gvn.transform(kls);
   }
 
+  args[0] = access_resolve_for_write(args[0]);
+  args[1] = access_resolve_for_write(args[1]);
+
   // Having loaded both klasses, test each for null.
   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
   for (which_arg = 0; which_arg <= 1; which_arg++) {
     Node* kls = klasses[which_arg];
     Node* null_ctl = top();

@@ -3403,22 +3507,28 @@
   phi->set_req(_prim_same_path,   intcon(1));
   phi->set_req(_ref_subtype_path, intcon(1));
 
   // pull together the cases:
   assert(region->req() == PATH_LIMIT, "sane region");
+  Node* cur_mem = reset_memory();
   for (uint i = 1; i < region->req(); i++) {
     Node* ctl = region->in(i);
     if (ctl == NULL || ctl == top()) {
       region->set_req(i, top());
       phi   ->set_req(i, top());
-    } else if (phi->in(i) == NULL) {
-      phi->set_req(i, intcon(0)); // all other paths produce 'false'
+      mem_phi->set_req(i, top());
+    } else {
+      if (phi->in(i) == NULL) {
+        phi->set_req(i, intcon(0)); // all other paths produce 'false'
+      }
+      mem_phi->set_req(i, (i == _prim_0_path || i == _prim_same_path) ?  cur_mem : init_mem);
     }
   }
 
   set_control(_gvn.transform(region));
   set_result(_gvn.transform(phi));
+  set_all_memory(_gvn.transform(mem_phi));
   return true;
 }
 
 //---------------------generate_array_guard_common------------------------
 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,

@@ -3644,10 +3754,12 @@
       // How many elements will we copy from the original?
       // The answer is MinI(orig_length - start, length).
       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
 
+      original = access_resolve_for_read(original);
+
       // Generate a direct call to the right arraycopy function(s).
       // We know the copy is disjoint but we might not know if the
       // oop stores need checking.
       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
       // This will fail a store-check if x contains any non-nulls.

@@ -3699,10 +3811,13 @@
         } else {
           ac->set_copyofrange(validated);
         }
         Node* n = _gvn.transform(ac);
         if (n == ac) {
+          if (UseShenandoahGC) {
+            ac->_adr_type = TypePtr::BOTTOM;
+          }
           ac->connect_outputs(this);
         } else {
           assert(validated, "shouldn't transform if all arguments not validated");
           set_all_memory(n);
         }

@@ -4134,12 +4249,12 @@
   Node* size    = ConvL2X(argument(7));  // type: long
 
   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
          "fieldOffset must be byte-scaled");
 
-  Node* src = make_unsafe_address(src_ptr, src_off);
-  Node* dst = make_unsafe_address(dst_ptr, dst_off);
+  Node* src = make_unsafe_address(src_ptr, src_off, false);
+  Node* dst = make_unsafe_address(dst_ptr, dst_off, true);
 
   // Conservatively insert a memory barrier on all memory slices.
   // Do not let writes of the copy source or destination float below the copy.
   insert_mem_bar(Op_MemBarCPUOrder);
 

@@ -4162,10 +4277,12 @@
 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
   assert(obj_size != NULL, "");
   Node* raw_obj = alloc_obj->in(1);
   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
 
+  obj = access_resolve_for_read(obj);
+
   AllocateNode* alloc = NULL;
   if (ReduceBulkZeroing) {
     // We will be completely responsible for initializing this object -
     // mark Initialize node as complete.
     alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);

@@ -4286,16 +4403,22 @@
         // because gc barriers are required when accessing the array.
         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
         if (is_obja != NULL) {
           PreserveJVMState pjvms2(this);
           set_control(is_obja);
+
+          obj = access_resolve_for_read(obj);
+
           // Generate a direct call to the right arraycopy function(s).
           Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false);
           ac->set_cloneoop();
           Node* n = _gvn.transform(ac);
           assert(n == ac, "cannot disappear");
+          if (UseShenandoahGC) {
+            ac->_adr_type = TypePtr::BOTTOM;
+          }
           ac->connect_outputs(this);
 
           result_reg->init_req(_objArray_path, control());
           result_val->init_req(_objArray_path, alloc_obj);
           result_i_o ->set_req(_objArray_path, i_o());

@@ -4411,20 +4534,20 @@
       Node* mem = alloc->in(TypeFunc::Memory);
       if (mem->is_MergeMem()) {
         for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
           Node* n = mms.memory();
           if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
-            assert(n->is_Store(), "what else?");
+            assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
             no_interfering_store = false;
             break;
           }
         }
       } else {
         for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
           Node* n = mms.memory();
           if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
-            assert(n->is_Store(), "what else?");
+            assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
             no_interfering_store = false;
             break;
           }
         }
       }

@@ -4761,21 +4884,27 @@
 
   if (stopped()) {
     return true;
   }
 
-  ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
+  Node* new_src = access_resolve_for_read(src);
+  Node* new_dest = access_resolve_for_write(dest);
+
+  ArrayCopyNode* ac = ArrayCopyNode::make(this, true, new_src, src_offset, new_dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
                                           // so the compiler has a chance to eliminate them: during macro expansion,
                                           // we have to set their control (CastPP nodes are eliminated).
                                           load_object_klass(src), load_object_klass(dest),
                                           load_array_length(src), load_array_length(dest));
 
   ac->set_arraycopy(validated);
 
   Node* n = _gvn.transform(ac);
   if (n == ac) {
+    if (UseShenandoahGC) {
+      ac->_adr_type = TypePtr::BOTTOM;
+    }
     ac->connect_outputs(this);
   } else {
     assert(validated, "shouldn't transform if all arguments not validated");
     set_all_memory(n);
   }

@@ -4792,10 +4921,14 @@
 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
                                            RegionNode* slow_region) {
   if (stopped())             return NULL;  // no fast path
   if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
 
+#if INCLUDE_SHENANDOAHGC
+  ptr = ShenandoahBarrierNode::skip_through_barrier(ptr);
+#endif
+
   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
   if (alloc == NULL)  return NULL;
 
   Node* rawmem = memory(Compile::AliasIdxRaw);
   // Is the allocation's memory state untouched?

@@ -4871,10 +5004,16 @@
   Node *src_offset  = argument(1);
   Node *dst         = argument(2);
   Node *dst_offset  = argument(3);
   Node *length      = argument(4);
 
+  src = shenandoah_must_be_not_null(src, true);
+  dst = shenandoah_must_be_not_null(dst, true);
+
+  src = access_resolve_for_read(src);
+  dst = access_resolve_for_write(dst);
+
   const Type* src_type = src->Value(&_gvn);
   const Type* dst_type = dst->Value(&_gvn);
   const TypeAryPtr* top_src = src_type->isa_aryptr();
   const TypeAryPtr* top_dest = dst_type->isa_aryptr();
   if (top_src  == NULL || top_src->klass()  == NULL ||

@@ -4923,10 +5062,16 @@
   Node* xlen = argument(1);
   Node* y    = argument(2);
   Node* ylen = argument(3);
   Node* z    = argument(4);
 
+  x = shenandoah_must_be_not_null(x, true);
+  x = access_resolve_for_read(x);
+  y = shenandoah_must_be_not_null(y, true);
+  y = access_resolve_for_read(y);
+  z = access_resolve_for_write(z);
+
   const Type* x_type = x->Value(&_gvn);
   const Type* y_type = y->Value(&_gvn);
   const TypeAryPtr* top_x = x_type->isa_aryptr();
   const TypeAryPtr* top_y = y_type->isa_aryptr();
   if (top_x  == NULL || top_x->klass()  == NULL ||

@@ -4968,11 +5113,21 @@
      __ if_then(z, BoolTest::eq, null()); {
        __ increment (need_alloc, one);
      } __ else_(); {
        // Update graphKit memory and control from IdealKit.
        sync_kit(ideal);
-       Node* zlen_arg = load_array_length(z);
+       Node* zlen_arg = NULL;
+       if (UseShenandoahGC) {
+         Node *cast = new CastPPNode(z, TypePtr::NOTNULL);
+         cast->init_req(0, control());
+         _gvn.set_type(cast, cast->bottom_type());
+         C->record_for_igvn(cast);
+
+         zlen_arg = load_array_length(cast);
+       } else {
+         zlen_arg = load_array_length(z);
+       }
        // Update IdealKit memory and control from graphKit.
        __ sync_kit(this);
        __ if_then(zlen_arg, BoolTest::lt, zlen); {
          __ increment (need_alloc, one);
        } __ end_if();

@@ -5023,10 +5178,15 @@
   Node* x    = argument(0);
   Node* len  = argument(1);
   Node* z    = argument(2);
   Node* zlen = argument(3);
 
+  x = shenandoah_must_be_not_null(x, true);
+  x = access_resolve_for_read(x);
+  z = shenandoah_must_be_not_null(z, true);
+  z = access_resolve_for_write(z);
+
   const Type* x_type = x->Value(&_gvn);
   const Type* z_type = z->Value(&_gvn);
   const TypeAryPtr* top_x = x_type->isa_aryptr();
   const TypeAryPtr* top_z = z_type->isa_aryptr();
   if (top_x  == NULL || top_x->klass()  == NULL ||

@@ -5070,10 +5230,14 @@
   Node* in       = argument(1);
   Node* offset   = argument(2);
   Node* len      = argument(3);
   Node* k        = argument(4);
 
+  in = access_resolve_for_read(in);
+  out = shenandoah_must_be_not_null(out, true);
+  out = access_resolve_for_write(out);
+
   const Type* out_type = out->Value(&_gvn);
   const Type* in_type = in->Value(&_gvn);
   const TypeAryPtr* top_out = out_type->isa_aryptr();
   const TypeAryPtr* top_in = in_type->isa_aryptr();
   if (top_out  == NULL || top_out->klass()  == NULL ||

@@ -5119,10 +5283,15 @@
   Node* n    = argument(2);
   Node* len  = argument(3);
   Node* inv  = argument(4);
   Node* m    = argument(6);
 
+  a = access_resolve_for_read(a);
+  b = access_resolve_for_read(b);
+  n = access_resolve_for_read(n);
+  m = access_resolve_for_write(m);
+
   const Type* a_type = a->Value(&_gvn);
   const TypeAryPtr* top_a = a_type->isa_aryptr();
   const Type* b_type = b->Value(&_gvn);
   const TypeAryPtr* top_b = b_type->isa_aryptr();
   const Type* n_type = a->Value(&_gvn);

@@ -5178,10 +5347,14 @@
   Node* n    = argument(1);
   Node* len  = argument(2);
   Node* inv  = argument(3);
   Node* m    = argument(5);
 
+  a = access_resolve_for_read(a);
+  n = access_resolve_for_read(n);
+  m = access_resolve_for_write(m);
+
   const Type* a_type = a->Value(&_gvn);
   const TypeAryPtr* top_a = a_type->isa_aryptr();
   const Type* n_type = a->Value(&_gvn);
   const TypeAryPtr* top_n = n_type->isa_aryptr();
   const Type* m_type = a->Value(&_gvn);

@@ -5247,12 +5420,12 @@
   }
 
   Node* call;
   jvms()->set_should_reexecute(true);
 
-  Node* obja_adr = make_unsafe_address(obja, aoffset);
-  Node* objb_adr = make_unsafe_address(objb, boffset);
+  Node* obja_adr = make_unsafe_address(obja, aoffset, false);
+  Node* objb_adr = make_unsafe_address(objb, boffset, false);
 
   call = make_runtime_call(RC_LEAF,
     OptoRuntime::vectorizedMismatch_Type(),
     stubAddr, stubName, TypePtr::BOTTOM,
     obja_adr, objb_adr, length, scale);

@@ -5322,10 +5495,12 @@
   if (src_elem != T_BYTE) {
     return false;
   }
 
   // 'src_start' points to src array + scaled offset
+  src = shenandoah_must_be_not_null(src, true);
+  src = access_resolve_for_read(src);
   Node* src_start = array_element_address(src, offset, src_elem);
 
   // We assume that range check is done by caller.
   // TODO: generate range check (offset+length < src.length) in debug VM.
 

@@ -5410,14 +5585,18 @@
   if (src_elem != T_BYTE) {
     return false;
   }
 
   // 'src_start' points to src array + scaled offset
+  src = access_resolve_for_read(src);
+  src = shenandoah_must_be_not_null(src, true);
   Node* src_start = array_element_address(src, offset, src_elem);
 
   // static final int[] byteTable in class CRC32C
   Node* table = get_table_from_crc32c_class(callee()->holder());
+  table = shenandoah_must_be_not_null(table, true);
+  table = access_resolve_for_read(table);
   Node* table_start = array_element_address(table, intcon(0), T_INT);
 
   // We assume that range check is done by caller.
   // TODO: generate range check (offset+length < src.length) in debug VM.
 

@@ -5457,10 +5636,12 @@
   // 'src_start' points to src array + scaled offset
   Node* src_start = basic_plus_adr(top(), base, offset);
 
   // static final int[] byteTable in class CRC32C
   Node* table = get_table_from_crc32c_class(callee()->holder());
+  table = shenandoah_must_be_not_null(table, true);
+  table = access_resolve_for_read(table);
   Node* table_start = array_element_address(table, intcon(0), T_INT);
 
   // Call the stub.
   address stubAddr = StubRoutines::updateBytesCRC32C();
   const char *stubName = "updateBytesCRC32C";

@@ -5500,10 +5681,11 @@
   if (src_elem != T_BYTE) {
     return false;
   }
 
   // 'src_start' points to src array + scaled offset
+  src = access_resolve_for_read(src);
   Node* src_start = array_element_address(src, offset, src_elem);
 
   // We assume that range check is done by caller.
   // TODO: generate range check (offset+length < src.length) in debug VM.
 

@@ -5612,10 +5794,20 @@
   if (is_static) {
     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
     fromObj = makecon(tip);
   }
 
+#if INCLUDE_SHENANDOAHGC
+  if ((ShenandoahOptimizeStaticFinals   && field->is_static()  && field->is_final()) ||
+      (ShenandoahOptimizeInstanceFinals && !field->is_static() && field->is_final()) ||
+      (ShenandoahOptimizeStableFinals   && field->is_stable())) {
+    // Skip the barrier for special fields
+  } else {
+    fromObj = access_resolve_for_read(fromObj);
+  }
+#endif
+
   // Next code  copied from Parse::do_get_xxx():
 
   // Compute address and memory type.
   int offset  = field->offset_in_bytes();
   bool is_vol = field->is_volatile();

@@ -5700,10 +5892,16 @@
   Node* src             = argument(1);
   Node* src_offset      = argument(2);
   Node* dest            = argument(3);
   Node* dest_offset     = argument(4);
 
+  // Resolve src and dest arrays for ShenandoahGC.
+  src = shenandoah_must_be_not_null(src, true);
+  src = access_resolve_for_read(src);
+  dest = shenandoah_must_be_not_null(dest, true);
+  dest = access_resolve_for_write(dest);
+
   // (1) src and dest are arrays.
   const Type* src_type = src->Value(&_gvn);
   const Type* dest_type = dest->Value(&_gvn);
   const TypeAryPtr* top_src = src_type->isa_aryptr();
   const TypeAryPtr* top_dest = dest_type->isa_aryptr();

@@ -5770,10 +5968,18 @@
   Node* src_offset                 = argument(2);
   Node* len                        = argument(3);
   Node* dest                       = argument(4);
   Node* dest_offset                = argument(5);
 
+
+  // inline_cipherBlockChaining_AESCrypt_predicate() has its own
+  // barrier. This one should optimize away.
+  src = shenandoah_must_be_not_null(src, false);
+  dest = shenandoah_must_be_not_null(dest, false);
+  src = access_resolve_for_read(src);
+  dest = access_resolve_for_write(dest);
+
   // (1) src and dest are arrays.
   const Type* src_type = src->Value(&_gvn);
   const Type* dest_type = dest->Value(&_gvn);
   const TypeAryPtr* top_src = src_type->isa_aryptr();
   const TypeAryPtr* top_dest = dest_type->isa_aryptr();

@@ -5814,10 +6020,13 @@
   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
   if (k_start == NULL) return false;
 
   // similarly, get the start address of the r vector
   Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
+
+  objRvec = access_resolve_for_write(objRvec);
+
   if (objRvec == NULL) return false;
   Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
 
   Node* cbcCrypt;
   if (Matcher::pass_original_key_for_aes()) {

@@ -5871,10 +6080,14 @@
   const TypeAryPtr* top_src = src_type->isa_aryptr();
   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
   assert(top_src != NULL && top_src->klass() != NULL &&
          top_dest != NULL && top_dest->klass() != NULL, "args are strange");
 
+  src = access_resolve_for_read(src);
+  dest = access_resolve_for_write(dest);
+  counterMode_object = access_resolve_for_write(counterMode_object);
+
   // checks are the responsibility of the caller
   Node* src_start = src;
   Node* dest_start = dest;
   if (src_offset != NULL || dest_offset != NULL) {
     assert(src_offset != NULL && dest_offset != NULL, "");

@@ -5903,14 +6116,16 @@
   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
   if (k_start == NULL) return false;
   // similarly, get the start address of the r vector
   Node* obj_counter = load_field_from_object(counterMode_object, "counter", "[B", /*is_exact*/ false);
   if (obj_counter == NULL) return false;
+  obj_counter = access_resolve_for_write(obj_counter);
   Node* cnt_start = array_element_address(obj_counter, intcon(0), T_BYTE);
 
   Node* saved_encCounter = load_field_from_object(counterMode_object, "encryptedCounter", "[B", /*is_exact*/ false);
   if (saved_encCounter == NULL) return false;
+  saved_encCounter = access_resolve_for_write(saved_encCounter);
   Node* saved_encCounter_start = array_element_address(saved_encCounter, intcon(0), T_BYTE);
   Node* used = field_address_from_object(counterMode_object, "used", "I", /*is_exact*/ false);
 
   Node* ctrCrypt;
   if (Matcher::pass_original_key_for_aes()) {

@@ -5946,10 +6161,12 @@
   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
 #endif // PPC64
   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
   if (objAESCryptKey == NULL) return (Node *) NULL;
 
+  objAESCryptKey = access_resolve_for_read(objAESCryptKey);
+
   // now have the array, need to get the start address of the K array
   Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
   return k_start;
 }
 

@@ -5957,10 +6174,12 @@
 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
   if (objAESCryptKey == NULL) return (Node *) NULL;
 
+  objAESCryptKey = access_resolve_for_read(objAESCryptKey);
+
   // now have the array, need to get the start address of the lastKey array
   Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
   return original_k_start;
 }
 

@@ -5975,10 +6194,13 @@
 //
 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
   // The receiver was checked for NULL already.
   Node* objCBC = argument(0);
 
+  Node* src = argument(1);
+  Node* dest = argument(4);
+
   // Load embeddedCipher field of CipherBlockChaining object.
   Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
 
   // get AESCrypt klass for instanceOf check
   // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point

@@ -5993,10 +6215,19 @@
     // if AESCrypt is not even loaded, we never take the intrinsic fast path
     Node* ctrl = control();
     set_control(top()); // no regular fast path
     return ctrl;
   }
+
+  // Resolve src and dest arrays for ShenandoahGC.  Here because new
+  // memory state is not handled by predicate logic in
+  // inline_cipherBlockChaining_AESCrypt itself
+  src = shenandoah_must_be_not_null(src, true);
+  dest = shenandoah_must_be_not_null(dest, true);
+  src = access_resolve_for_write(src);
+  dest = access_resolve_for_write(dest);
+
   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
 
   Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
   Node* cmp_instof  = _gvn.transform(new CmpINode(instof, intcon(1)));
   Node* bool_instof  = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));

@@ -6010,12 +6241,11 @@
   // for decryption, we need to add a further check to avoid
   // taking the intrinsic path when cipher and plain are the same
   // see the original java code for why.
   RegionNode* region = new RegionNode(3);
   region->init_req(1, instof_false);
-  Node* src = argument(1);
-  Node* dest = argument(4);
+
   Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
   Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
   Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
   region->init_req(2, src_dest_conjoint);
 

@@ -6078,10 +6308,18 @@
   Node* offset         = argument(1);
   Node* len            = argument(2);
   Node* state          = argument(3);
   Node* subkeyH        = argument(4);
 
+  state = shenandoah_must_be_not_null(state, true);
+  subkeyH = shenandoah_must_be_not_null(subkeyH, true);
+  data = shenandoah_must_be_not_null(data, true);
+
+  state = access_resolve_for_write(state);
+  subkeyH = access_resolve_for_read(subkeyH);
+  data = access_resolve_for_read(data);
+
   Node* state_start  = array_element_address(state, intcon(0), T_LONG);
   assert(state_start, "state is NULL");
   Node* subkeyH_start  = array_element_address(subkeyH, intcon(0), T_LONG);
   assert(subkeyH_start, "subkeyH is NULL");
   Node* data_start  = array_element_address(data, offset, T_BYTE);

@@ -6109,10 +6347,15 @@
   Node* len = argument(3);
   Node* dest = argument(4);
   Node* dp = argument(5);
   Node* isURL = argument(6);
 
+  src = must_be_not_null(src, true);
+  src = access_resolve_for_read(src);
+  dest = must_be_not_null(dest, true);
+  dest = access_resolve_for_write(dest);
+
   Node* src_start = array_element_address(src, intcon(0), T_BYTE);
   assert(src_start, "source array is NULL");
   Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
   assert(dest_start, "destination array is NULL");
 

@@ -6151,10 +6394,12 @@
   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
   if (src_elem != T_BYTE) {
     return false;
   }
   // 'src_start' points to src array + offset
+  src = shenandoah_must_be_not_null(src, true);
+  src = access_resolve_for_read(src);
   Node* src_start = array_element_address(src, ofs, src_elem);
   Node* state = NULL;
   address stubAddr;
   const char *stubName;
 

@@ -6220,10 +6465,12 @@
   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
   if (src_elem != T_BYTE) {
     return false;
   }
   // 'src_start' points to src array + offset
+  src = shenandoah_must_be_not_null(src, false);
+  src = access_resolve_for_read(src);
   Node* src_start = array_element_address(src, ofs, src_elem);
 
   const char* klass_SHA_name = NULL;
   const char* stub_name = NULL;
   address     stub_addr = NULL;

@@ -6304,10 +6551,12 @@
 Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
   Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
   assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
   if (sha_state == NULL) return (Node *) NULL;
 
+  sha_state = access_resolve_for_write(sha_state);
+
   // now have the array, need to get the start address of the state array
   Node* state = array_element_address(sha_state, intcon(0), T_INT);
   return state;
 }
 

@@ -6315,10 +6564,12 @@
 Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
   Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
   assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
   if (sha_state == NULL) return (Node *) NULL;
 
+  sha_state = access_resolve_for_write(sha_state);
+
   // now have the array, need to get the start address of the state array
   Node* state = array_element_address(sha_state, intcon(0), T_LONG);
   return state;
 }
 
< prev index next >