< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page
@@ -22,14 +22,17 @@
   *
   */
  
  #include "precompiled.hpp"
  #include "gc/shared/tlab_globals.hpp"
+ #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  #include "gc/shenandoah/shenandoahFreeSet.hpp"
  #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+ #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
+ #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  #include "logging/logStream.hpp"
  #include "memory/resourceArea.hpp"
  #include "runtime/orderAccess.hpp"
  
  ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :

@@ -59,10 +62,27 @@
    assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
            idx, _max, _collector_leftmost, _collector_rightmost);
    return _collector_free_bitmap.at(idx);
  }
  
+ HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahRegionAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
+   for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
+     // size_t is unsigned, need to dodge underflow when _leftmost = 0
+     size_t idx = c - 1;
+     if (is_collector_free(idx)) {
+       ShenandoahHeapRegion* r = _heap->get_region(idx);
+       if (r->affiliation() == affiliation) {
+         HeapWord* result = try_allocate_in(r, req, in_new_region);
+         if (result != NULL) {
+           return result;
+         }
+       }
+     }
+   }
+   return NULL;
+ }
+ 
  HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
    // Scan the bitmap looking for a first fit.
    //
    // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
    // we would find the region to allocate at right away.

@@ -90,30 +110,29 @@
  
        // There is no recovery. Mutator does not touch collector view at all.
        break;
      }
      case ShenandoahAllocRequest::_alloc_gclab:
+     case ShenandoahAllocRequest::_alloc_plab:
      case ShenandoahAllocRequest::_alloc_shared_gc: {
-       // size_t is unsigned, need to dodge underflow when _leftmost = 0
- 
-       // Fast-path: try to allocate in the collector view first
-       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
-         size_t idx = c - 1;
-         if (is_collector_free(idx)) {
-           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
-           if (result != NULL) {
-             return result;
-           }
-         }
+       // First try to fit into a region that is already in use in the same generation.
+       HeapWord* result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
+       if (result != NULL) {
+         return result;
+       }
+       // Then try a free region that is dedicated to GC allocations.
+       result = allocate_with_affiliation(FREE, req, in_new_region);
+       if (result != NULL) {
+         return result;
        }
  
        // No dice. Can we borrow space from mutator view?
        if (!ShenandoahEvacReserveOverflow) {
          return NULL;
        }
  
-       // Try to steal the empty region from the mutator view
+       // Try to steal an empty region from the mutator view.
        for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
          size_t idx = c - 1;
          if (is_mutator_free(idx)) {
            ShenandoahHeapRegion* r = _heap->get_region(idx);
            if (can_allocate_from(r)) {

@@ -127,17 +146,15 @@
        }
  
        // No dice. Do not try to mix mutator and GC allocations, because
        // URWM moves due to GC allocations would expose unparsable mutator
        // allocations.
- 
        break;
      }
      default:
        ShouldNotReachHere();
    }
- 
    return NULL;
  }
  
  HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
    assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());

@@ -147,39 +164,120 @@
      return NULL;
    }
  
    try_recycle_trashed(r);
  
+   if (r->affiliation() == ShenandoahRegionAffiliation::FREE) {
+     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
+ 
+     r->set_affiliation(req.affiliation());
+     r->set_update_watermark(r->bottom());
+ 
+     // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
+     // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
+     // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
+     // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
+     // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
+     // coalesce-and-fill processing.  This code is only necessary if req.affiliation() is OLD, but harmless if not.
+     r->end_preemptible_coalesce_and_fill();
+     ctx->capture_top_at_mark_start(r);
+ 
+     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
+     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
+ 
+     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
+     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
+     log_debug(gc)("NOT clearing bitmap for region " SIZE_FORMAT ", top_bitmap: "
+                   PTR_FORMAT " at transition from FREE to %s",
+                   r->index(), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
+   } else if (r->affiliation() != req.affiliation()) {
+     return NULL;
+   }
+ 
    in_new_region = r->is_empty();
  
    HeapWord* result = NULL;
    size_t size = req.size();
  
+   // req.size() is in words, free() is in bytes.
    if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
-     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
-     if (size > free) {
-       size = free;
+     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
+       // Need to assure that plabs are aligned on multiple of card region.
+       size_t free = r->free();
+       size_t usable_free = (free / CardTable::card_size) << CardTable::card_shift;
+       free /= HeapWordSize;
+       usable_free /= HeapWordSize;
+       if (size > usable_free) {
+         size = usable_free;
+       }
+       if (size >= req.min_size()) {
+         result = r->allocate_aligned(size, req, CardTable::card_size);
+         if (result != nullptr && free > usable_free) {
+           // Account for the alignment padding
+           size_t padding = (free - usable_free) * HeapWordSize;
+           increase_used(padding);
+           assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
+           _heap->old_generation()->increase_used(padding);
+           // For verification consistency, we need to report this padding to _heap
+           _heap->increase_used(padding);
+         }
+       }
+     } else {
+       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
+       if (size > free) {
+         size = free;
+       }
+       if (size >= req.min_size()) {
+         result = r->allocate(size, req);
+         assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
+       }
      }
-     if (size >= req.min_size()) {
-       result = r->allocate(size, req.type());
-       assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
+   } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) {
+     size_t free = r->free();
+     size_t usable_free = (free / CardTable::card_size) << CardTable::card_shift;
+     free /= HeapWordSize;
+     usable_free /= HeapWordSize;
+     if (size <= usable_free) {
+       assert(size % CardTable::card_size_in_words == 0, "PLAB size must be multiple of remembered set card size");
+ 
+       result = r->allocate_aligned(size, req, CardTable::card_size);
+       if (result != nullptr) {
+         // Account for the alignment padding
+         size_t padding = (free - usable_free) * HeapWordSize;
+         increase_used(padding);
+         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
+         _heap->old_generation()->increase_used(padding);
+         // For verification consistency, we need to report this padding to _heap
+         _heap->increase_used(padding);
+       }
      }
    } else {
-     result = r->allocate(size, req.type());
+     result = r->allocate(size, req);
    }
  
    if (result != NULL) {
+     // Record actual allocation size
+     req.set_actual_size(size);
+ 
      // Allocation successful, bump stats:
      if (req.is_mutator_alloc()) {
        increase_used(size * HeapWordSize);
+     } else if (req.is_gc_alloc()) {
+       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
+       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
+       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
+       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
+       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
+       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
+       // next evacuation pass.
+       r->set_update_watermark(r->top());
      }
  
-     // Record actual allocation size
-     req.set_actual_size(size);
- 
-     if (req.is_gc_alloc()) {
-       r->set_update_watermark(r->top());
+     if (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
+       _heap->young_generation()->increase_used(size * HeapWordSize);
+     } else if (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
+       _heap->old_generation()->increase_used(size * HeapWordSize);
      }
    }
  
    if (result == NULL || has_no_alloc_capacity(r)) {
      // Region cannot afford this or future allocations. Retire it.

@@ -192,10 +290,11 @@
      // Record the remainder as allocation waste
      if (req.is_mutator_alloc()) {
        size_t waste = r->free();
        if (waste > 0) {
          increase_used(waste);
+         _heap->generation_for(req.affiliation())->increase_allocated(waste);
          _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
        }
      }
  
      size_t num = r->index();

@@ -280,10 +379,11 @@
  
      end++;
    };
  
    size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
+   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
  
    // Initialize regions:
    for (size_t i = beg; i <= end; i++) {
      ShenandoahHeapRegion* r = _heap->get_region(i);
      try_recycle_trashed(r);

@@ -303,22 +403,43 @@
        used_words = remainder;
      } else {
        used_words = ShenandoahHeapRegion::region_size_words();
      }
  
-     r->set_top(r->bottom() + used_words);
+     r->set_affiliation(req.affiliation());
+     r->set_update_watermark(r->bottom());
+     r->set_top(r->bottom());    // Set top to bottom so we can capture TAMS
+     ctx->capture_top_at_mark_start(r);
+     r->set_top(r->bottom() + used_words); // Then change top to reflect allocation of humongous object.
+     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
+     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
+ 
+     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
+     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
+     // ctx->clear_bitmap(r);
+     log_debug(gc)("NOT clearing bitmap for Humongous region [" PTR_FORMAT ", " PTR_FORMAT "], top_bitmap: "
+                   PTR_FORMAT " at transition from FREE to %s",
+                   p2i(r->bottom()), p2i(r->end()), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
  
      _mutator_free_bitmap.clear_bit(r->index());
    }
  
    // While individual regions report their true use, all humongous regions are
    // marked used in the free set.
    increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
  
+   if (req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
+     _heap->young_generation()->increase_used(words_size * HeapWordSize);
+   } else if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
+     _heap->old_generation()->increase_used(words_size * HeapWordSize);
+   }
+ 
    if (remainder != 0) {
      // Record this remainder as allocation waste
-     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
+     size_t waste = ShenandoahHeapRegion::region_size_words() - remainder;
+     _heap->notify_mutator_alloc_words(waste, true);
+     _heap->generation_for(req.affiliation())->increase_allocated(waste * HeapWordSize);
    }
  
    // Allocated at left/rightmost? Move the bounds appropriately.
    if (beg == _mutator_leftmost || end == _mutator_rightmost) {
      adjust_bounds();

@@ -382,10 +503,14 @@
  
    if (touches_bounds(idx)) {
      adjust_bounds();
    }
    assert_bounds();
+ 
+   // We do not ensure that the region is no longer trash,
+   // relying on try_allocate_in(), which always comes next,
+   // to recycle trash before attempting to allocate anything in the region.
  }
  
  void ShenandoahFreeSet::clear() {
    shenandoah_assert_heaplocked();
    clear_internal();

@@ -404,10 +529,11 @@
  
  void ShenandoahFreeSet::rebuild() {
    shenandoah_assert_heaplocked();
    clear();
  
+   log_debug(gc)("Rebuilding FreeSet");
    for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
      ShenandoahHeapRegion* region = _heap->get_region(idx);
      if (region->is_alloc_allowed() || region->is_trash()) {
        assert(!region->is_cset(), "Shouldn't be adding those to the free set");
  

@@ -417,10 +543,12 @@
        _capacity += alloc_capacity(region);
        assert(_used <= _capacity, "must not use more than we have");
  
        assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
        _mutator_free_bitmap.set_bit(idx);
+ 
+       log_debug(gc)("  Setting Region " SIZE_FORMAT " _mutator_free_bitmap bit to true", idx);
      }
    }
  
    // Evac reserve: reserve trailing space for evacuations
    size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;

@@ -434,10 +562,11 @@
        _mutator_free_bitmap.clear_bit(idx);
        _collector_free_bitmap.set_bit(idx);
        size_t ac = alloc_capacity(region);
        _capacity -= ac;
        reserved += ac;
+       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
      }
    }
  
    recompute_bounds();
    assert_bounds();

@@ -542,10 +671,11 @@
      switch (req.type()) {
        case ShenandoahAllocRequest::_alloc_shared:
        case ShenandoahAllocRequest::_alloc_shared_gc:
          in_new_region = true;
          return allocate_contiguous(req);
+       case ShenandoahAllocRequest::_alloc_plab:
        case ShenandoahAllocRequest::_alloc_gclab:
        case ShenandoahAllocRequest::_alloc_tlab:
          in_new_region = false;
          assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
                 req.size(), ShenandoahHeapRegion::humongous_threshold_words());
< prev index next >