< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page
@@ -22,14 +22,18 @@
   *
   */
  
  #include "precompiled.hpp"
  #include "gc/shared/tlab_globals.hpp"
+ #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  #include "gc/shenandoah/shenandoahFreeSet.hpp"
  #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+ #include "gc/shenandoah/shenandoahOldGeneration.hpp"
+ #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
+ #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  #include "logging/logStream.hpp"
  #include "memory/resourceArea.hpp"
  #include "runtime/orderAccess.hpp"
  
  ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :

@@ -59,10 +63,54 @@
    assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
            idx, _max, _collector_leftmost, _collector_rightmost);
    return _collector_free_bitmap.at(idx);
  }
  
+ // This is a temporary solution to work around a shortcoming with the existing free set implementation.
+ // TODO:
+ //   Remove this function after restructing FreeSet representation.  A problem in the existing implementation is that old-gen
+ //   regions are not considered to reside within the is_collector_free range.
+ //
+ HeapWord* ShenandoahFreeSet::allocate_with_old_affiliation(ShenandoahAllocRequest& req, bool& in_new_region) {
+   ShenandoahRegionAffiliation affiliation = ShenandoahRegionAffiliation::OLD_GENERATION;
+ 
+   size_t rightmost = MAX2(_collector_rightmost, _mutator_rightmost);
+   size_t leftmost = MIN2(_collector_leftmost, _mutator_leftmost);
+ 
+   for (size_t c = rightmost + 1; c > leftmost; c--) {
+     // size_t is unsigned, need to dodge underflow when _leftmost = 0
+     size_t idx = c - 1;
+     ShenandoahHeapRegion* r = _heap->get_region(idx);
+     if (r->affiliation() == affiliation && !r->is_humongous()) {
+       if (!r->is_cset() && !has_no_alloc_capacity(r)) {
+         HeapWord* result = try_allocate_in(r, req, in_new_region);
+         if (result != NULL) {
+           return result;
+         }
+       }
+     }
+   }
+   return nullptr;
+ }
+ 
+ HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahRegionAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
+   for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
+     // size_t is unsigned, need to dodge underflow when _leftmost = 0
+     size_t idx = c - 1;
+     if (is_collector_free(idx)) {
+       ShenandoahHeapRegion* r = _heap->get_region(idx);
+       if (r->affiliation() == affiliation) {
+         HeapWord* result = try_allocate_in(r, req, in_new_region);
+         if (result != NULL) {
+           return result;
+         }
+       }
+     }
+   }
+   return NULL;
+ }
+ 
  HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
    // Scan the bitmap looking for a first fit.
    //
    // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
    // we would find the region to allocate at right away.

@@ -72,130 +120,304 @@
    // of regions from the beginning most of the time.
    //
    // Free set maintains mutator and collector views, and normally they allocate in their views only,
    // unless we special cases for stealing and mixed allocations.
  
+   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
+ 
+   bool allow_new_region = true;
+   switch (req.affiliation()) {
+     case ShenandoahRegionAffiliation::OLD_GENERATION:
+       // Note: unsigned result from adjusted_unaffiliated_regions() will never be less than zero, but it may equal zero.
+       if (_heap->old_generation()->adjusted_unaffiliated_regions() <= 0) {
+         allow_new_region = false;
+       }
+       break;
+ 
+     case ShenandoahRegionAffiliation::YOUNG_GENERATION:
+       // Note: unsigned result from adjusted_unaffiliated_regions() will never be less than zero, but it may equal zero.
+       if (_heap->young_generation()->adjusted_unaffiliated_regions() <= 0) {
+         allow_new_region = false;
+       }
+       break;
+ 
+     case ShenandoahRegionAffiliation::FREE:
+     default:
+       ShouldNotReachHere();
+       break;
+   }
+ 
    switch (req.type()) {
      case ShenandoahAllocRequest::_alloc_tlab:
      case ShenandoahAllocRequest::_alloc_shared: {
- 
        // Try to allocate in the mutator view
        for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
-         if (is_mutator_free(idx)) {
-           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
+         ShenandoahHeapRegion* r = _heap->get_region(idx);
+         if (is_mutator_free(idx) && (allow_new_region || r->affiliation() != ShenandoahRegionAffiliation::FREE)) {
+           // try_allocate_in() increases used if the allocation is successful.
+           HeapWord* result = try_allocate_in(r, req, in_new_region);
            if (result != NULL) {
              return result;
            }
          }
        }
- 
        // There is no recovery. Mutator does not touch collector view at all.
        break;
      }
      case ShenandoahAllocRequest::_alloc_gclab:
-     case ShenandoahAllocRequest::_alloc_shared_gc: {
-       // size_t is unsigned, need to dodge underflow when _leftmost = 0
+       // GCLABs are for evacuation so we must be in evacuation phase.  If this allocation is successful, increment
+       // the relevant evac_expended rather than used value.
  
-       // Fast-path: try to allocate in the collector view first
-       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
-         size_t idx = c - 1;
-         if (is_collector_free(idx)) {
-           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
-           if (result != NULL) {
-             return result;
-           }
+     case ShenandoahAllocRequest::_alloc_plab:
+       // PLABs always reside in old-gen and are only allocated during evacuation phase.
+ 
+     case ShenandoahAllocRequest::_alloc_shared_gc: {
+       // First try to fit into a region that is already in use in the same generation.
+       HeapWord* result;
+       if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
+         // TODO: this is a work around to address a deficiency in FreeSet representation.  A better solution fixes
+         // the FreeSet implementation to deal more efficiently with old-gen regions as being in the "collector free set"
+         result = allocate_with_old_affiliation(req, in_new_region);
+       } else {
+         result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
+       }
+       if (result != NULL) {
+         return result;
+       }
+       if (allow_new_region) {
+         // Then try a free region that is dedicated to GC allocations.
+         result = allocate_with_affiliation(FREE, req, in_new_region);
+         if (result != NULL) {
+           return result;
          }
        }
  
        // No dice. Can we borrow space from mutator view?
        if (!ShenandoahEvacReserveOverflow) {
          return NULL;
        }
  
-       // Try to steal the empty region from the mutator view
-       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
-         size_t idx = c - 1;
-         if (is_mutator_free(idx)) {
-           ShenandoahHeapRegion* r = _heap->get_region(idx);
-           if (can_allocate_from(r)) {
-             flip_to_gc(r);
-             HeapWord *result = try_allocate_in(r, req, in_new_region);
-             if (result != NULL) {
-               return result;
+       if (allow_new_region) {
+         // Try to steal an empty region from the mutator view.
+         for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
+           size_t idx = c - 1;
+           if (is_mutator_free(idx)) {
+             ShenandoahHeapRegion* r = _heap->get_region(idx);
+             if (can_allocate_from(r)) {
+               flip_to_gc(r);
+               HeapWord *result = try_allocate_in(r, req, in_new_region);
+               if (result != NULL) {
+                 return result;
+               }
              }
            }
          }
        }
  
        // No dice. Do not try to mix mutator and GC allocations, because
        // URWM moves due to GC allocations would expose unparsable mutator
        // allocations.
- 
        break;
      }
      default:
        ShouldNotReachHere();
    }
- 
    return NULL;
  }
  
  HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
    assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
  
    if (_heap->is_concurrent_weak_root_in_progress() &&
        r->is_trash()) {
      return NULL;
    }
- 
    try_recycle_trashed(r);
+   if (r->affiliation() == ShenandoahRegionAffiliation::FREE) {
+     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
+     r->set_affiliation(req.affiliation());
+     if (r->is_old()) {
+       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
+       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
+       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
+       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
+       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
+       // coalesce-and-fill processing.
+       r->end_preemptible_coalesce_and_fill();
+       _heap->clear_cards_for(r);
+     }
  
-   in_new_region = r->is_empty();
+     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
+     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
+ 
+     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
+     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
+     log_debug(gc)("NOT clearing bitmap for region " SIZE_FORMAT ", top_bitmap: "
+                   PTR_FORMAT " at transition from FREE to %s",
+                   r->index(), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
+   } else if (r->affiliation() != req.affiliation()) {
+     return NULL;
+   }
  
+   in_new_region = r->is_empty();
    HeapWord* result = NULL;
    size_t size = req.size();
  
+   // req.size() is in words, r->free() is in bytes.
    if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
-     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
-     if (size > free) {
-       size = free;
+     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
+       // Need to assure that plabs are aligned on multiple of card region.
+       size_t free = r->free();
+       size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift();
+       if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
+         // We'll have to add another card's memory to the padding
+         if (usable_free > CardTable::card_size()) {
+           usable_free -= CardTable::card_size();
+         } else {
+           assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
+         }
+       }
+       free /= HeapWordSize;
+       usable_free /= HeapWordSize;
+       size_t remnant = size % CardTable::card_size_in_words();
+       if (remnant > 0) {
+         // Since we have Elastic TLABs, align size up.  This is consistent with aligning min_size up.
+         size = size - remnant + CardTable::card_size_in_words();
+       }
+       if (size > usable_free) {
+         size = usable_free;
+         assert(size % CardTable::card_size_in_words() == 0, "usable_free is a multiple of card table size");
+       }
+ 
+       size_t adjusted_min_size = req.min_size();
+       remnant = adjusted_min_size % CardTable::card_size_in_words();
+       if (remnant > 0) {
+         // Round up adjusted_min_size to a multiple of alignment size
+         adjusted_min_size = adjusted_min_size - remnant + CardTable::card_size_in_words();
+       }
+       if (size >= adjusted_min_size) {
+         result = r->allocate_aligned(size, req, CardTable::card_size());
+         assert(result != nullptr, "Allocation cannot fail");
+         size = req.actual_size();
+         assert(r->top() <= r->end(), "Allocation cannot span end of region");
+         // actual_size() will be set to size below.
+         assert((result == nullptr) || (size % CardTable::card_size_in_words() == 0),
+                "PLAB size must be multiple of card size");
+         assert((result == nullptr) || (((uintptr_t) result) % CardTable::card_size_in_words() == 0),
+                "PLAB start must align with card boundary");
+         if (free > usable_free) {
+           // Account for the alignment padding
+           size_t padding = (free - usable_free) * HeapWordSize;
+           increase_used(padding);
+           assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
+           _heap->old_generation()->increase_used(padding);
+           // For verification consistency, we need to report this padding to _heap
+           _heap->increase_used(padding);
+         }
+       }
+       // Otherwise, leave result == NULL because the adjusted size is smaller than min size.
+     } else {
+       // This is a GCLAB or a TLAB allocation
+       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
+       if (size > free) {
+         size = free;
+       }
+       if (size >= req.min_size()) {
+         result = r->allocate(size, req);
+         if (result != nullptr) {
+           // Record actual allocation size
+           req.set_actual_size(size);
+         }
+         assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
+       } else {
+         log_trace(gc, ergo)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
+                            " because min_size() is " SIZE_FORMAT, req.size(), r->index(), size, req.min_size());
+       }
      }
-     if (size >= req.min_size()) {
-       result = r->allocate(size, req.type());
-       assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
+   } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) {
+     // inelastic PLAB
+     size_t free = r->free();
+     size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift();
+     free /= HeapWordSize;
+     usable_free /= HeapWordSize;
+     if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
+       // We'll have to add another card's memory to the padding
+       if (usable_free > CardTable::card_size_in_words()) {
+         usable_free -= CardTable::card_size_in_words();
+       } else {
+         assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
+       }
+     }
+     assert(size % CardTable::card_size_in_words() == 0, "PLAB size must be multiple of remembered set card size");
+     if (size <= usable_free) {
+       result = r->allocate_aligned(size, req, CardTable::card_size());
+       size = req.actual_size();
+       assert(result != nullptr, "Allocation cannot fail");
+       assert(r->top() <= r->end(), "Allocation cannot span end of region");
+       assert(req.actual_size() % CardTable::card_size_in_words() == 0, "PLAB start must align with card boundary");
+       assert(((uintptr_t) result) % CardTable::card_size_in_words() == 0, "PLAB start must align with card boundary");
+       if (free > usable_free) {
+         // Account for the alignment padding
+         size_t padding = (free - usable_free) * HeapWordSize;
+         increase_used(padding);
+         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
+         _heap->old_generation()->increase_used(padding);
+         // For verification consistency, we need to report this padding to _heap
+         _heap->increase_used(padding);
+       }
      }
    } else {
-     result = r->allocate(size, req.type());
+     result = r->allocate(size, req);
+     if (result != nullptr) {
+       // Record actual allocation size
+       req.set_actual_size(size);
+     }
    }
  
    if (result != NULL) {
      // Allocation successful, bump stats:
      if (req.is_mutator_alloc()) {
+       // Mutator allocations always pull from young gen.
+       _heap->young_generation()->increase_used(size * HeapWordSize);
        increase_used(size * HeapWordSize);
-     }
- 
-     // Record actual allocation size
-     req.set_actual_size(size);
- 
-     if (req.is_gc_alloc()) {
+     } else {
+       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
+ 
+       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
+       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
+       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
+       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
+       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
+       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
+       // next evacuation pass.
        r->set_update_watermark(r->top());
+ 
+       if (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
+         _heap->young_generation()->increase_used(size * HeapWordSize);
+       } else {
+         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "GC Alloc was not YOUNG so must be OLD");
+         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
+         _heap->old_generation()->increase_used(size * HeapWordSize);
+         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
+       }
      }
    }
- 
    if (result == NULL || has_no_alloc_capacity(r)) {
      // Region cannot afford this or future allocations. Retire it.
      //
      // While this seems a bit harsh, especially in the case when this large allocation does not
      // fit, but the next small one would, we are risking to inflate scan times when lots of
-     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
-     // TODO: Record first fully-empty region, and use that for large allocations
+     // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.
+     // TODO: Record first fully-empty region, and use that for large allocations and/or organize
+     // available free segments within regions for more efficient searches for "good fit".
  
      // Record the remainder as allocation waste
      if (req.is_mutator_alloc()) {
        size_t waste = r->free();
        if (waste > 0) {
          increase_used(waste);
+         _heap->generation_for(req.affiliation())->increase_allocated(waste);
          _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
        }
      }
  
      size_t num = r->index();

@@ -246,12 +468,15 @@
    shenandoah_assert_heaplocked();
  
    size_t words_size = req.size();
    size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
  
+   assert(req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION, "Humongous regions always allocated in YOUNG");
+   size_t avail_young_regions = _heap->young_generation()->adjusted_unaffiliated_regions();
+ 
    // No regions left to satisfy allocation, bye.
-   if (num > mutator_count()) {
+   if (num > mutator_count() || (num > avail_young_regions)) {
      return NULL;
    }
  
    // Find the continuous interval of $num regions, starting from $beg and ending in $end,
    // inclusive. Contiguous allocations are biased to the beginning.

@@ -280,10 +505,11 @@
  
      end++;
    };
  
    size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
+   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
  
    // Initialize regions:
    for (size_t i = beg; i <= end; i++) {
      ShenandoahHeapRegion* r = _heap->get_region(i);
      try_recycle_trashed(r);

@@ -303,30 +529,49 @@
        used_words = remainder;
      } else {
        used_words = ShenandoahHeapRegion::region_size_words();
      }
  
-     r->set_top(r->bottom() + used_words);
+     r->set_affiliation(req.affiliation());
+     r->set_update_watermark(r->bottom());
+     r->set_top(r->bottom());    // Set top to bottom so we can capture TAMS
+     ctx->capture_top_at_mark_start(r);
+     r->set_top(r->bottom() + used_words); // Then change top to reflect allocation of humongous object.
+     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
+     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
+ 
+     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
+     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
+     // ctx->clear_bitmap(r);
+     log_debug(gc)("NOT clearing bitmap for Humongous region [" PTR_FORMAT ", " PTR_FORMAT "], top_bitmap: "
+                   PTR_FORMAT " at transition from FREE to %s",
+                   p2i(r->bottom()), p2i(r->end()), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
  
      _mutator_free_bitmap.clear_bit(r->index());
    }
  
    // While individual regions report their true use, all humongous regions are
    // marked used in the free set.
    increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
+   if (req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
+     _heap->young_generation()->increase_used(words_size * HeapWordSize);
+   } else if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
+     _heap->old_generation()->increase_used(words_size * HeapWordSize);
+   }
  
    if (remainder != 0) {
      // Record this remainder as allocation waste
-     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
+     size_t waste = ShenandoahHeapRegion::region_size_words() - remainder;
+     _heap->notify_mutator_alloc_words(waste, true);
+     _heap->generation_for(req.affiliation())->increase_allocated(waste * HeapWordSize);
    }
  
    // Allocated at left/rightmost? Move the bounds appropriately.
    if (beg == _mutator_leftmost || end == _mutator_rightmost) {
      adjust_bounds();
    }
    assert_bounds();
- 
    req.set_actual_size(words_size);
    return _heap->get_region(beg)->bottom();
  }
  
  bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {

@@ -354,11 +599,10 @@
  }
  
  void ShenandoahFreeSet::recycle_trash() {
    // lock is not reentrable, check we don't have it
    shenandoah_assert_not_heaplocked();
- 
    for (size_t i = 0; i < _heap->num_regions(); i++) {
      ShenandoahHeapRegion* r = _heap->get_region(i);
      if (r->is_trash()) {
        ShenandoahHeapLocker locker(_heap->lock());
        try_recycle_trashed(r);

@@ -382,10 +626,14 @@
  
    if (touches_bounds(idx)) {
      adjust_bounds();
    }
    assert_bounds();
+ 
+   // We do not ensure that the region is no longer trash,
+   // relying on try_allocate_in(), which always comes next,
+   // to recycle trash before attempting to allocate anything in the region.
  }
  
  void ShenandoahFreeSet::clear() {
    shenandoah_assert_heaplocked();
    clear_internal();

@@ -404,10 +652,11 @@
  
  void ShenandoahFreeSet::rebuild() {
    shenandoah_assert_heaplocked();
    clear();
  
+   log_debug(gc)("Rebuilding FreeSet");
    for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
      ShenandoahHeapRegion* region = _heap->get_region(idx);
      if (region->is_alloc_allowed() || region->is_trash()) {
        assert(!region->is_cset(), "Shouldn't be adding those to the free set");
  

@@ -417,15 +666,36 @@
        _capacity += alloc_capacity(region);
        assert(_used <= _capacity, "must not use more than we have");
  
        assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
        _mutator_free_bitmap.set_bit(idx);
+ 
+       log_debug(gc)("  Setting Region " SIZE_FORMAT " _mutator_free_bitmap bit to true", idx);
      }
    }
  
    // Evac reserve: reserve trailing space for evacuations
-   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
+   if (!_heap->mode()->is_generational()) {
+     size_t to_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
+     reserve_regions(to_reserve);
+   } else {
+     size_t young_reserve = (_heap->young_generation()->max_capacity() / 100) * ShenandoahEvacReserve;
+     // Note that all allocations performed from old-gen are performed by GC, generally using PLABs for both
+     // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
+     // which is reserved for promotion is enforced using thread-local variables that prescribe intentons within
+     // each PLAB.  We do not reserve any of old-gen memory in order to facilitate the loaning of old-gen memory
+     // to young-gen purposes.
+     size_t old_reserve = 0;
+     size_t to_reserve = young_reserve + old_reserve;
+     reserve_regions(to_reserve);
+   }
+ 
+   recompute_bounds();
+   assert_bounds();
+ }
+ 
+ void ShenandoahFreeSet::reserve_regions(size_t to_reserve) {
    size_t reserved = 0;
  
    for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
      if (reserved >= to_reserve) break;
  

@@ -434,15 +704,13 @@
        _mutator_free_bitmap.clear_bit(idx);
        _collector_free_bitmap.set_bit(idx);
        size_t ac = alloc_capacity(region);
        _capacity -= ac;
        reserved += ac;
+       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
      }
    }
- 
-   recompute_bounds();
-   assert_bounds();
  }
  
  void ShenandoahFreeSet::log_status() {
    shenandoah_assert_heaplocked();
  

@@ -536,16 +804,18 @@
  
  HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
    shenandoah_assert_heaplocked();
    assert_bounds();
  
+   // Allocation request is known to satisfy all memory budgeting constraints.
    if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
      switch (req.type()) {
        case ShenandoahAllocRequest::_alloc_shared:
        case ShenandoahAllocRequest::_alloc_shared_gc:
          in_new_region = true;
          return allocate_contiguous(req);
+       case ShenandoahAllocRequest::_alloc_plab:
        case ShenandoahAllocRequest::_alloc_gclab:
        case ShenandoahAllocRequest::_alloc_tlab:
          in_new_region = false;
          assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
                 req.size(), ShenandoahHeapRegion::humongous_threshold_words());
< prev index next >