< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp

Print this page
@@ -1,7 +1,8 @@
  /*
   * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
+  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   *
   * This code is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License version 2 only, as
   * published by the Free Software Foundation.

@@ -39,21 +40,25 @@
  #endif
  
  class ShenandoahBarrierSetC1;
  class ShenandoahBarrierSetC2;
  
- ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
+ ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region) :
    BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
               make_barrier_set_c1<ShenandoahBarrierSetC1>(),
               make_barrier_set_c2<ShenandoahBarrierSetC2>(),
               new ShenandoahBarrierSetNMethod(heap),
               new ShenandoahBarrierSetStackChunk(),
               BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
    _heap(heap),
    _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
    _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
  {
+   if (ShenandoahCardBarrier) {
+     _card_table = new ShenandoahCardTable(heap_region);
+     _card_table->initialize();
+   }
  }
  
  ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
    BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
    return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);

@@ -122,10 +127,19 @@
      PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
      if (gclab != nullptr) {
        gclab->retire();
      }
  
+     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
+     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.
+     // This is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each
+     // PLAB is aligned with the start of each card's memory range.
+     // TODO: Assert this in retire_plab?
+     if (plab != nullptr) {
+       _heap->retire_plab(plab);
+     }
+ 
      // SATB protocol requires to keep alive reachable oops from roots at the beginning of GC
      if (ShenandoahStackWatermarkBarrier) {
        if (_heap->is_concurrent_mark_in_progress()) {
          ShenandoahKeepAliveClosure oops;
          StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);

@@ -140,5 +154,24 @@
  void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
    if (_heap->has_forwarded_objects() || (ShenandoahIUBarrier && _heap->is_concurrent_mark_in_progress())) {
      clone_barrier(src);
    }
  }
+ 
+ void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
+   assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
+ 
+   HeapWord* end = (HeapWord*)((char*) start + (count * heapOopSize));
+   // In the case of compressed oops, start and end may potentially be misaligned;
+   // so we need to conservatively align the first downward (this is not
+   // strictly necessary for current uses, but a case of good hygiene and,
+   // if you will, aesthetics) and the second upward (this is essential for
+   // current uses) to a HeapWord boundary, so we mark all cards overlapping
+   // this write.
+   HeapWord* aligned_start = align_down(start, HeapWordSize);
+   HeapWord* aligned_end   = align_up  (end,   HeapWordSize);
+   // If compressed oops were not being used, these should already be aligned
+   assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
+          "Expected heap word alignment of start and end");
+   _heap->card_scan()->mark_range_as_dirty(aligned_start, (aligned_end - aligned_start));
+ }
+ 
< prev index next >