< prev index next >

src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp

Print this page
@@ -31,10 +31,11 @@
  #include "gc/shenandoah/shenandoahRuntime.hpp"
  #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
+ #include "gc/shenandoah/mode/shenandoahMode.hpp"
  #include "opto/arraycopynode.hpp"
  #include "opto/escape.hpp"
  #include "opto/graphKit.hpp"
  #include "opto/idealKit.hpp"
  #include "opto/macro.hpp"

@@ -239,11 +240,11 @@
  
    // Now some of the values
    Node* marking;
    Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
    Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
-   marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
+   marking = __ AndI(ld, __ ConI(ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING));
    assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
  
    // if (!marking)
    __ if_then(marking, BoolTest::ne, zero, unlikely); {
      BasicType index_bt = TypeX_X->basic_type();

@@ -320,11 +321,11 @@
    Node* cmpx = bol->in(1);
    if (bol->as_Bool()->_test._test == BoolTest::ne &&
        cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
        is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
        cmpx->in(1)->in(2)->is_Con() &&
-       cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) {
+       cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING)) {
      return true;
    }
  
    return false;
  }

@@ -449,10 +450,102 @@
  
    // Final sync IdealKit and GraphKit.
    kit->final_sync(ideal);
  }
  
+ Node* ShenandoahBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
+   BarrierSet* bs = BarrierSet::barrier_set();
+   ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
+   CardTable::CardValue* card_table_base = ctbs->card_table()->byte_map_base();
+   if (card_table_base != NULL) {
+     return kit->makecon(TypeRawPtr::make((address)card_table_base));
+   } else {
+     return kit->null();
+   }
+ }
+ 
+ void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
+                                           Node* ctl,
+                                           Node* oop_store,
+                                           Node* obj,
+                                           Node* adr,
+                                           uint  adr_idx,
+                                           Node* val,
+                                           BasicType bt,
+                                           bool use_precise) const {
+   if (!ShenandoahHeap::heap()->mode()->is_generational()) {
+     return;
+   }
+ 
+   ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
+   CardTable* ct = ctbs->card_table();
+   // No store check needed if we're storing a NULL or an old object
+   // (latter case is probably a string constant). The concurrent
+   // mark sweep garbage collector, however, needs to have all nonNull
+   // oop updates flagged via card-marks.
+   if (val != NULL && val->is_Con()) {
+     // must be either an oop or NULL
+     const Type* t = val->bottom_type();
+     if (t == TypePtr::NULL_PTR || t == Type::TOP)
+       // stores of null never (?) need barriers
+       return;
+   }
+ 
+   if (ReduceInitialCardMarks && obj == kit->just_allocated_object(kit->control())) {
+     // We can skip marks on a freshly-allocated object in Eden.
+     // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
+     // That routine informs GC to take appropriate compensating steps,
+     // upon a slow-path allocation, so as to make this card-mark
+     // elision safe.
+     return;
+   }
+ 
+   if (!use_precise) {
+     // All card marks for a (non-array) instance are in one place:
+     adr = obj;
+   }
+   // (Else it's an array (or unknown), and we want more precise card marks.)
+   assert(adr != NULL, "");
+ 
+   IdealKit ideal(kit, true);
+ 
+   // Convert the pointer to an int prior to doing math on it
+   Node* cast = __ CastPX(__ ctrl(), adr);
+ 
+   // Divide by card size
+   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
+ 
+   // Combine card table base and card offset
+   Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
+ 
+   // Get the alias_index for raw card-mark memory
+   int adr_type = Compile::AliasIdxRaw;
+   Node*   zero = __ ConI(0); // Dirty card value
+ 
+   if (UseCondCardMark) {
+     // The classic GC reference write barrier is typically implemented
+     // as a store into the global card mark table.  Unfortunately
+     // unconditional stores can result in false sharing and excessive
+     // coherence traffic as well as false transactional aborts.
+     // UseCondCardMark enables MP "polite" conditional card mark
+     // stores.  In theory we could relax the load from ctrl() to
+     // no_ctrl, but that doesn't buy much latitude.
+     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
+     __ if_then(card_val, BoolTest::ne, zero);
+   }
+ 
+   // Smash zero into card
+   __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
+ 
+   if (UseCondCardMark) {
+     __ end_if();
+   }
+ 
+   // Final sync IdealKit and GraphKit.
+   kit->final_sync(ideal);
+ }
+ 
  #undef __
  
  const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
    const Type **fields = TypeTuple::fields(2);
    fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value

@@ -515,22 +608,28 @@
      Node* value = val.node();
      value = shenandoah_iu_barrier(kit, value);
      val.set_node(value);
      shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
                                   static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
+ 
+     Node* result = BarrierSetC2::store_at_resolved(access, val);
+     bool is_array = (decorators & IS_ARRAY) != 0;
+     bool use_precise = is_array || anonymous;
+     post_barrier(kit, kit->control(), access.raw_access(), access.base(), adr, adr_idx, val.node(), access.type(), use_precise);
+     return result;
    } else {
      assert(access.is_opt_access(), "only for optimization passes");
      assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
      C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
      PhaseGVN& gvn =  opt_access.gvn();
  
      if (ShenandoahIUBarrier) {
        Node* enqueue = gvn.transform(new ShenandoahIUBarrierNode(val.node()));
        val.set_node(enqueue);
      }
+     return BarrierSetC2::store_at_resolved(access, val);
    }
-   return BarrierSetC2::store_at_resolved(access, val);
  }
  
  Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
    // 1: non-reference load, no additional barrier is needed
    if (!access.is_oop()) {

@@ -597,11 +696,11 @@
  
    return load;
  }
  
  Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
-                                                    Node* new_val, const Type* value_type) const {
+                                                              Node* new_val, const Type* value_type) const {
    GraphKit* kit = access.kit();
    if (access.is_oop()) {
      new_val = shenandoah_iu_barrier(kit, new_val);
      shenandoah_write_barrier_pre(kit, false /* do_load */,
                                   NULL, NULL, max_juint, NULL, NULL,

@@ -639,10 +738,11 @@
      if (adr->bottom_type()->is_ptr_to_narrowoop()) {
        load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
      }
  #endif
      load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store, access.decorators()));
+     post_barrier(kit, kit->control(), access.raw_access(), access.base(), access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
      return load_store;
    }
    return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
  }
  

@@ -694,10 +794,12 @@
          }
        }
      }
      access.set_raw_access(load_store);
      pin_atomic_op(access);
+     post_barrier(kit, kit->control(), access.raw_access(), access.base(),
+                  access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
      return load_store;
    }
    return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
  }
  

@@ -710,10 +812,12 @@
    if (access.is_oop()) {
      result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result, access.decorators()));
      shenandoah_write_barrier_pre(kit, false /* do_load */,
                                   NULL, NULL, max_juint, NULL, NULL,
                                   result /* pre_val */, T_OBJECT);
+     post_barrier(kit, kit->control(), access.raw_access(), access.base(),
+                  access.addr().node(), access.alias_idx(), val, T_OBJECT, true);
    }
    return result;
  }
  
  // Support for GC barriers emitted during parsing

@@ -792,11 +896,11 @@
            Compile::current()->dependencies()->assert_leaf_type(ik);
          }
        }
      } else {
        return true;
-         }
+     }
    } else if (src_type->isa_aryptr()) {
      BasicType src_elem  = src_type->klass()->as_array_klass()->element_type()->basic_type();
      if (is_reference_type(src_elem)) {
        return true;
      }

@@ -835,11 +939,11 @@
      debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
  
      Node* gc_state    = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
      int flags = ShenandoahHeap::HAS_FORWARDED;
      if (ShenandoahIUBarrier) {
-       flags |= ShenandoahHeap::MARKING;
+       flags |= ShenandoahHeap::YOUNG_MARKING;
      }
      Node* stable_and  = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(flags)));
      Node* stable_cmp  = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
      Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
  

@@ -903,13 +1007,30 @@
    if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
      state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
    }
  }
  
- void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const {
-   if (is_shenandoah_wb_pre_call(n)) {
-     shenandoah_eliminate_wb_pre(n, &macro->igvn());
+ void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
+   if (is_shenandoah_wb_pre_call(node)) {
+     shenandoah_eliminate_wb_pre(node, &macro->igvn());
+   }
+   if (node->Opcode() == Op_CastP2X && ShenandoahHeap::heap()->mode()->is_generational()) {
+     assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
+      Node *shift = node->unique_out();
+      Node *addp = shift->unique_out();
+      for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
+        Node *mem = addp->last_out(j);
+        if (UseCondCardMark && mem->is_Load()) {
+          assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
+          // The load is checking if the card has been written so
+          // replace it with zero to fold the test.
+          macro->replace_node(mem, macro->intcon(0));
+          continue;
+        }
+        assert(mem->is_Store(), "store required");
+        macro->replace_node(mem, mem->in(MemNode::Memory));
+      }
    }
  }
  
  void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
    assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
< prev index next >