< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp

Print this page
*** 38,20 ***
  #endif
  
  class ShenandoahBarrierSetC1;
  class ShenandoahBarrierSetC2;
  
! ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
    BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
               make_barrier_set_c1<ShenandoahBarrierSetC1>(),
               make_barrier_set_c2<ShenandoahBarrierSetC2>(),
               ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : NULL,
               BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
    _heap(heap),
    _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
    _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
  {
  }
  
  ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
    BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
    return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
--- 38,24 ---
  #endif
  
  class ShenandoahBarrierSetC1;
  class ShenandoahBarrierSetC2;
  
! ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region) :
    BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
               make_barrier_set_c1<ShenandoahBarrierSetC1>(),
               make_barrier_set_c2<ShenandoahBarrierSetC2>(),
               ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : NULL,
               BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
    _heap(heap),
    _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
    _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
  {
+   if (heap->mode()->is_generational()) {
+     _card_table = new ShenandoahCardTable(heap_region);
+     _card_table->initialize();
+   }
  }
  
  ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
    BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
    return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);

*** 116,10 ***
--- 120,18 ---
      PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
      if (gclab != NULL) {
        gclab->retire();
      }
  
+     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
+     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.
+     // This is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each
+     // PLAB is aligned with the start of each card's memory range.
+     if (plab != NULL) {
+       _heap->retire_plab(plab);
+     }
+ 
      // SATB protocol requires to keep alive reacheable oops from roots at the beginning of GC
      if (ShenandoahStackWatermarkBarrier) {
        if (_heap->is_concurrent_mark_in_progress()) {
          ShenandoahKeepAliveClosure oops;
          StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);

*** 134,5 ***
--- 146,26 ---
  void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
    if (_heap->has_forwarded_objects() || (ShenandoahIUBarrier && _heap->is_concurrent_mark_in_progress())) {
      clone_barrier(src);
    }
  }
+ 
+ void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
+   if (!_heap->mode()->is_generational()) {
+     return;
+   }
+ 
+   HeapWord* end = (HeapWord*)((char*) start + (count * heapOopSize));
+   // In the case of compressed oops, start and end may potentially be misaligned;
+   // so we need to conservatively align the first downward (this is not
+   // strictly necessary for current uses, but a case of good hygiene and,
+   // if you will, aesthetics) and the second upward (this is essential for
+   // current uses) to a HeapWord boundary, so we mark all cards overlapping
+   // this write.
+   HeapWord* aligned_start = align_down(start, HeapWordSize);
+   HeapWord* aligned_end   = align_up  (end,   HeapWordSize);
+   // If compressed oops were not being used, these should already be aligned
+   assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
+          "Expected heap word alignment of start and end");
+   _heap->card_scan()->mark_range_as_dirty(aligned_start, (aligned_end - aligned_start));
+ }
+ 
< prev index next >