< prev index next >

src/share/vm/opto/parse3.cpp

Print this page

        

@@ -34,10 +34,14 @@
 #include "opto/runtime.hpp"
 #include "opto/subnode.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
 
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#endif
+
 //=============================================================================
 // Helper methods for _get* and _put* bytecodes
 //=============================================================================
 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
   // Could be the field_holder's <clinit> method, or <clinit> for a subklass.

@@ -194,10 +198,11 @@
         }
       }
     }
   }
 
+  Node* leading_membar = NULL;
   ciType* field_klass = field->type();
   bool is_vol = field->is_volatile();
 
   // Compute address and memory type.
   int offset = field->offset_in_bytes();

@@ -226,17 +231,24 @@
     }
   } else {
     type = Type::get_const_basic_type(bt);
   }
   if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
-    insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
+    leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
   }
   // Build the load.
   //
   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
   Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
 
+  Node* load = ld;
+#if INCLUDE_ALL_GCS
+  if (UseShenandoahGC && (bt == T_OBJECT || bt == T_ARRAY)) {
+    ld = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, ld);
+  }
+#endif
+
   // Adjust Java stack
   if (type2size[bt] == 1)
     push(ld);
   else
     push_pair(ld);

@@ -270,20 +282,25 @@
   // If reference is volatile, prevent following memory ops from
   // floating up past the volatile read.  Also prevents commoning
   // another volatile read.
   if (field->is_volatile()) {
     // Memory barrier includes bogus read of value to force load BEFORE membar
-    insert_mem_bar(Op_MemBarAcquire, ld);
+    assert(leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
+    Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
+    mb->as_MemBar()->set_trailing_load();
   }
 }
 
 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
+  Node* leading_membar = NULL;
   bool is_vol = field->is_volatile();
   // If reference is volatile, prevent following memory ops from
   // floating down past the volatile write.  Also prevents commoning
   // another volatile read.
-  if (is_vol)  insert_mem_bar(Op_MemBarRelease);
+  if (is_vol) {
+    leading_membar = insert_mem_bar(Op_MemBarRelease);
+  }
 
   // Compute address and memory type.
   int offset = field->offset_in_bytes();
   const TypePtr* adr_type = C->alias_type(field)->adr_type();
   Node* adr = basic_plus_adr(obj, obj, offset);

@@ -320,11 +337,12 @@
   // If reference is volatile, prevent following volatiles ops from
   // floating up before the volatile write.
   if (is_vol) {
     // If not multiple copy atomic, we do the MemBarVolatile before the load.
     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
-      insert_mem_bar(Op_MemBarVolatile); // Use fat membar
+      Node* mb = insert_mem_bar(Op_MemBarVolatile, store); // Use fat membar
+      MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
     }
     // Remember we wrote a volatile field.
     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
     // in constructors which have such stores. See do_exits() in parse1.cpp.
     if (is_field) {
< prev index next >