< prev index next >

src/share/vm/opto/library_call.cpp

Print this page

        

@@ -40,10 +40,15 @@
 #include "opto/runtime.hpp"
 #include "opto/subnode.hpp"
 #include "prims/nativeLookup.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
+#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
+#endif
 
 class LibraryIntrinsic : public InlineCallGenerator {
   // Extend the set of intrinsics known to the runtime:
  public:
  private:

@@ -2428,11 +2433,11 @@
   // is enabled, we need to log the value in the referent field in an SATB buffer.
   // This routine performs some compile time filters and generates suitable
   // runtime filters that guard the pre-barrier code.
   // Also add memory barrier for non volatile load from the referent field
   // to prevent commoning of loads across safepoint.
-  if (!UseG1GC && !need_mem_bar)
+  if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar)
     return;
 
   // Some compile time checks.
 
   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?

@@ -2685,10 +2690,18 @@
   // the barriers get omitted and the unsafe reference begins to "pollute"
   // the alias analysis of the rest of the graph, either Compile::can_alias
   // or Compile::must_alias will throw a diagnostic assert.)
   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
 
+#if INCLUDE_ALL_GCS
+  // Work around JDK-8220714 bug. This is done for Shenandoah only, until
+  // the shared code fix is upstreamed and properly tested there.
+  if (UseShenandoahGC) {
+    need_mem_bar |= is_native_ptr;
+  }
+#endif
+
   // If we are reading the value of the referent field of a Reference
   // object (either by using Unsafe directly or through reflection)
   // then, if G1 is enabled, we need to record the referent in an
   // SATB log buffer using the pre-barrier mechanism.
   // Also we need to add memory barrier to prevent commoning reads

@@ -2744,10 +2757,15 @@
   if (!is_store) {
     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
     // To be valid, unsafe loads may depend on other conditions than
     // the one that guards them: pin the Load node
     load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
+#if INCLUDE_ALL_GCS
+    if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) {
+      load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load);
+    }
+#endif
     // load value
     switch (type) {
     case T_BOOLEAN:
     case T_CHAR:
     case T_BYTE:

@@ -2797,10 +2815,15 @@
     }
   }
 
   if (is_volatile) {
     if (!is_store) {
+#if INCLUDE_ALL_GCS
+      if (UseShenandoahGC) {
+        load = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(load);
+      }
+#endif
       Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
       mb->as_MemBar()->set_trailing_load();
     } else {
       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
         Node* mb = insert_mem_bar(Op_MemBarVolatile, store);

@@ -3109,10 +3132,15 @@
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
     }
 #endif
+#if INCLUDE_ALL_GCS
+  if (UseShenandoahGC) {
+    load_store = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load_store);
+  }
+#endif
     if (can_move_pre_barrier()) {
       // Don't need to load pre_val. The old value is returned by load_store.
       // The pre_barrier can execute after the xchg as long as no safepoint
       // gets inserted between them.
       pre_barrier(false /* do_load */,

@@ -4544,10 +4572,24 @@
   // Compute the length also, if needed:
   Node* countx = size;
   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
 
+#if INCLUDE_ALL_GCS
+  if (UseShenandoahGC && ShenandoahCloneBarrier) {
+    assert (src->is_AddP(), "for clone the src should be the interior ptr");
+    assert (dest->is_AddP(), "for clone the dst should be the interior ptr");
+
+    // Make sure that references in the cloned object are updated for Shenandoah.
+    make_runtime_call(RC_LEAF|RC_NO_FP,
+                      OptoRuntime::shenandoah_clone_barrier_Type(),
+                      CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
+                      "shenandoah_clone_barrier", TypePtr::BOTTOM,
+                      src->in(AddPNode::Base));
+  }
+#endif
+
   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
   bool disjoint_bases = true;
   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
                                src, NULL, dest, NULL, countx,
                                /*dest_uninitialized*/true);

@@ -5280,11 +5322,11 @@
       checked_value   = cv;
     }
     // At this point we know we do not need type checks on oop stores.
 
     // Let's see if we need card marks:
-    if (alloc != NULL && use_ReduceInitialCardMarks()) {
+    if (alloc != NULL && use_ReduceInitialCardMarks() && ! UseShenandoahGC) {
       // If we do not need card marks, copy using the jint or jlong stub.
       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
              "sizes agree");
     }

@@ -6314,10 +6356,16 @@
   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
 
   Node* no_ctrl = NULL;
   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
 
+#if INCLUDE_ALL_GCS
+  if (UseShenandoahGC) {
+    result = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, result);
+  }
+#endif
+
   // Use the pre-barrier to record the value in the referent field
   pre_barrier(false /* do_load */,
               control(),
               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
               result /* pre_val */,

@@ -6370,10 +6418,16 @@
     leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
   }
   // Build the load.
   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
+#if INCLUDE_ALL_GCS
+  if (UseShenandoahGC && (bt == T_OBJECT || bt == T_ARRAY)) {
+    loadedField = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, loadedField);
+  }
+#endif
+
   // If reference is volatile, prevent following memory ops from
   // floating up past the volatile read.  Also prevents commoning
   // another volatile read.
   if (is_vol) {
     // Memory barrier includes bogus read of value to force load BEFORE membar
< prev index next >